* [PATCH 00/10] add hairpin queue support
@ 2023-04-21 6:50 beilei.xing
2023-04-21 6:50 ` [PATCH 01/10] net/cpfl: refine structures beilei.xing
` (10 more replies)
0 siblings, 11 replies; 164+ messages in thread
From: beilei.xing @ 2023-04-21 6:50 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patchset adds hairpin queue support.
It depends on the following two patch set:
https://patches.dpdk.org/project/dpdk/cover/20230406074245.82991-1-beilei.xing@intel.com/
https://patches.dpdk.org/project/dpdk/cover/20230413094502.1714755-1-wenjing.qiao@intel.com/
Beilei Xing (10):
net/cpfl: refine structures
net/cpfl: support hairpin queue capbility get
common/idpf: support queue groups add/delete
net/cpfl: add haipin queue group during vpotr init
net/cpfl: support hairpin queue setup and release
net/cpfl: support hairpin queue configuration
net/cpfl: support hairpin queue start/stop
net/cpfl: enable write back based on ITR expire
net/cpfl: support peer ports get
net/cpfl: support hairpin bind/unbind
drivers/common/idpf/idpf_common_device.c | 75 ++
drivers/common/idpf/idpf_common_device.h | 4 +
drivers/common/idpf/idpf_common_virtchnl.c | 138 +++-
drivers/common/idpf/idpf_common_virtchnl.h | 18 +
drivers/common/idpf/version.map | 6 +
drivers/net/cpfl/cpfl_ethdev.c | 588 +++++++++++++--
drivers/net/cpfl/cpfl_ethdev.h | 33 +-
drivers/net/cpfl/cpfl_rxtx.c | 807 +++++++++++++++++++--
drivers/net/cpfl/cpfl_rxtx.h | 67 ++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 21 +-
10 files changed, 1638 insertions(+), 119 deletions(-)
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH 01/10] net/cpfl: refine structures
2023-04-21 6:50 [PATCH 00/10] add hairpin queue support beilei.xing
@ 2023-04-21 6:50 ` beilei.xing
2023-04-21 6:50 ` [PATCH 02/10] net/cpfl: support hairpin queue capbility get beilei.xing
` (9 subsequent siblings)
10 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-04-21 6:50 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch refines some structures to support hairpin queue,
cpfl_rx_queue/cpfl_tx_queue/cpfl_vport.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 85 +++++++-----
drivers/net/cpfl/cpfl_ethdev.h | 6 +-
drivers/net/cpfl/cpfl_rxtx.c | 175 +++++++++++++++++-------
drivers/net/cpfl/cpfl_rxtx.h | 8 ++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 17 +--
5 files changed, 196 insertions(+), 95 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 306b8ad769..4a507f05d5 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -124,7 +124,8 @@ static int
cpfl_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct rte_eth_link new_link;
unsigned int i;
@@ -156,7 +157,8 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,
static int
cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
dev_info->max_rx_queues = base->caps.max_rx_q;
@@ -216,7 +218,8 @@ cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
static int
cpfl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
/* mtu setting is forbidden if port is start */
if (dev->data->dev_started) {
@@ -256,12 +259,12 @@ static uint64_t
cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
{
uint64_t mbuf_alloc_failed = 0;
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i = 0;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- mbuf_alloc_failed += __atomic_load_n(&rxq->rx_stats.mbuf_alloc_failed,
+ cpfl_rxq = dev->data->rx_queues[i];
+ mbuf_alloc_failed += __atomic_load_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,
__ATOMIC_RELAXED);
}
@@ -271,8 +274,8 @@ cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
static int
cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
int ret;
@@ -305,20 +308,20 @@ cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
static void
cpfl_reset_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- __atomic_store_n(&rxq->rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
+ cpfl_rxq = dev->data->rx_queues[i];
+ __atomic_store_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
}
}
static int
cpfl_dev_stats_reset(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
int ret;
@@ -343,8 +346,8 @@ static int cpfl_dev_xstats_reset(struct rte_eth_dev *dev)
static int cpfl_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned int n)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
unsigned int i;
int ret;
@@ -459,7 +462,8 @@ cpfl_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t idx, shift;
int ret = 0;
@@ -498,7 +502,8 @@ cpfl_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t idx, shift;
int ret = 0;
@@ -536,7 +541,8 @@ static int
cpfl_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
int ret = 0;
@@ -601,7 +607,8 @@ static int
cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
int ret = 0;
@@ -638,7 +645,8 @@ cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,
static int
cpfl_dev_configure(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct rte_eth_conf *conf = &dev->data->dev_conf;
struct idpf_adapter *base = vport->adapter;
int ret;
@@ -710,7 +718,8 @@ cpfl_dev_configure(struct rte_eth_dev *dev)
static int
cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
uint16_t nb_rx_queues = dev->data->nb_rx_queues;
return idpf_vport_irq_map_config(vport, nb_rx_queues);
@@ -719,14 +728,14 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
static int
cpfl_start_queues(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
- struct idpf_tx_queue *txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
int err = 0;
int i;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL || txq->tx_deferred_start)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
continue;
err = cpfl_tx_queue_start(dev, i);
if (err != 0) {
@@ -736,8 +745,8 @@ cpfl_start_queues(struct rte_eth_dev *dev)
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- if (rxq == NULL || rxq->rx_deferred_start)
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)
continue;
err = cpfl_rx_queue_start(dev, i);
if (err != 0) {
@@ -752,7 +761,8 @@ cpfl_start_queues(struct rte_eth_dev *dev)
static int
cpfl_dev_start(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(base);
uint16_t num_allocated_vectors = base->caps.num_allocated_vectors;
@@ -815,7 +825,8 @@ cpfl_dev_start(struct rte_eth_dev *dev)
static int
cpfl_dev_stop(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
if (vport->stopped == 1)
return 0;
@@ -836,7 +847,8 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
static int
cpfl_dev_close(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
@@ -846,7 +858,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
adapter->cur_vport_nb--;
dev->data->dev_private = NULL;
adapter->vports[vport->sw_idx] = NULL;
- rte_free(vport);
+ rte_free(cpfl_vport);
return 0;
}
@@ -1051,7 +1063,7 @@ cpfl_find_vport(struct cpfl_adapter_ext *adapter, uint32_t vport_id)
int i;
for (i = 0; i < adapter->cur_vport_nb; i++) {
- vport = adapter->vports[i];
+ vport = &adapter->vports[i]->base;
if (vport->vport_id != vport_id)
continue;
else
@@ -1328,7 +1340,8 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter)
static int
cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_vport_param *param = init_params;
struct cpfl_adapter_ext *adapter = param->adapter;
/* for sending create vport virtchnl msg prepare */
@@ -1354,7 +1367,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
goto err;
}
- adapter->vports[param->idx] = vport;
+ adapter->vports[param->idx] = cpfl_vport;
adapter->cur_vports |= RTE_BIT32(param->devarg_id);
adapter->cur_vport_nb++;
@@ -1470,7 +1483,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
snprintf(name, sizeof(name), "cpfl_%s_vport_0",
pci_dev->device.name);
retval = rte_eth_dev_create(&pci_dev->device, name,
- sizeof(struct idpf_vport),
+ sizeof(struct cpfl_vport),
NULL, NULL, cpfl_dev_vport_init,
&vport_param);
if (retval != 0)
@@ -1488,7 +1501,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
pci_dev->device.name,
devargs.req_vports[i]);
retval = rte_eth_dev_create(&pci_dev->device, name,
- sizeof(struct idpf_vport),
+ sizeof(struct cpfl_vport),
NULL, NULL, cpfl_dev_vport_init,
&vport_param);
if (retval != 0)
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 200dfcac02..81fe9ac4c3 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -69,13 +69,17 @@ struct cpfl_devargs {
uint16_t req_vport_nb;
};
+struct cpfl_vport {
+ struct idpf_vport base;
+};
+
struct cpfl_adapter_ext {
TAILQ_ENTRY(cpfl_adapter_ext) next;
struct idpf_adapter base;
char name[CPFL_ADAPTER_NAME_LEN];
- struct idpf_vport **vports;
+ struct cpfl_vport **vports;
uint16_t max_vport_nb;
uint16_t cur_vports; /* bit mask of created vport */
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index de59b31b3d..a441e2ffbe 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -128,7 +128,8 @@ cpfl_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *rxq,
uint16_t nb_desc, unsigned int socket_id,
struct rte_mempool *mp, uint8_t bufq_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct idpf_hw *hw = &base->hw;
const struct rte_memzone *mz;
@@ -219,15 +220,69 @@ cpfl_rx_split_bufq_release(struct idpf_rx_queue *bufq)
rte_free(bufq);
}
+static void
+cpfl_rx_queue_release(void *rxq)
+{
+ struct cpfl_rx_queue *cpfl_rxq = rxq;
+ struct idpf_rx_queue *q = NULL;
+
+ if (cpfl_rxq == NULL)
+ return;
+
+ q = &cpfl_rxq->base;
+
+ /* Split queue */
+ if (!q->adapter->is_rx_singleq) {
+ if (q->bufq2)
+ cpfl_rx_split_bufq_release(q->bufq2);
+
+ if (q->bufq1)
+ cpfl_rx_split_bufq_release(q->bufq1);
+
+ rte_free(cpfl_rxq);
+ return;
+ }
+
+ /* Single queue */
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(cpfl_rxq);
+}
+
+static void
+cpfl_tx_queue_release(void *txq)
+{
+ struct cpfl_tx_queue *cpfl_txq = txq;
+ struct idpf_tx_queue *q = NULL;
+
+ if (cpfl_txq == NULL)
+ return;
+
+ q = &cpfl_txq->base;
+
+ if (q->complq) {
+ rte_memzone_free(q->complq->mz);
+ rte_free(q->complq);
+ }
+
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(cpfl_txq);
+}
+
int
cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct idpf_hw *hw = &base->hw;
+ struct cpfl_rx_queue *cpfl_rxq;
const struct rte_memzone *mz;
struct idpf_rx_queue *rxq;
uint16_t rx_free_thresh;
@@ -247,21 +302,23 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Free memory if needed */
if (dev->data->rx_queues[queue_idx] != NULL) {
- idpf_qc_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
dev->data->rx_queues[queue_idx] = NULL;
}
/* Setup Rx queue */
- rxq = rte_zmalloc_socket("cpfl rxq",
- sizeof(struct idpf_rx_queue),
+ cpfl_rxq = rte_zmalloc_socket("cpfl rxq",
+ sizeof(struct cpfl_rx_queue),
RTE_CACHE_LINE_SIZE,
socket_id);
- if (rxq == NULL) {
+ if (cpfl_rxq == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
ret = -ENOMEM;
goto err_rxq_alloc;
}
+ rxq = &cpfl_rxq->base;
+
is_splitq = !!(vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
rxq->mp = mp;
@@ -328,7 +385,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
rxq->q_set = true;
- dev->data->rx_queues[queue_idx] = rxq;
+ dev->data->rx_queues[queue_idx] = cpfl_rxq;
return 0;
@@ -348,7 +405,8 @@ cpfl_tx_complq_setup(struct rte_eth_dev *dev, struct idpf_tx_queue *txq,
uint16_t queue_idx, uint16_t nb_desc,
unsigned int socket_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
const struct rte_memzone *mz;
struct idpf_tx_queue *cq;
int ret;
@@ -396,9 +454,11 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t tx_rs_thresh, tx_free_thresh;
+ struct cpfl_tx_queue *cpfl_txq;
struct idpf_hw *hw = &base->hw;
const struct rte_memzone *mz;
struct idpf_tx_queue *txq;
@@ -418,21 +478,23 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Free memory if needed. */
if (dev->data->tx_queues[queue_idx] != NULL) {
- idpf_qc_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
dev->data->tx_queues[queue_idx] = NULL;
}
/* Allocate the TX queue data structure. */
- txq = rte_zmalloc_socket("cpfl txq",
- sizeof(struct idpf_tx_queue),
+ cpfl_txq = rte_zmalloc_socket("cpfl txq",
+ sizeof(struct cpfl_tx_queue),
RTE_CACHE_LINE_SIZE,
socket_id);
- if (txq == NULL) {
+ if (cpfl_txq == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
ret = -ENOMEM;
goto err_txq_alloc;
}
+ txq = &cpfl_txq->base;
+
is_splitq = !!(vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
txq->nb_tx_desc = nb_desc;
@@ -486,7 +548,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
queue_idx * vport->chunks_info.tx_qtail_spacing);
txq->ops = &def_txq_ops;
txq->q_set = true;
- dev->data->tx_queues[queue_idx] = txq;
+ dev->data->tx_queues[queue_idx] = cpfl_txq;
return 0;
@@ -502,6 +564,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
+ struct cpfl_rx_queue *cpfl_rxq;
struct idpf_rx_queue *rxq;
uint16_t max_pkt_len;
uint32_t frame_size;
@@ -510,7 +573,8 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (rx_queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
- rxq = dev->data->rx_queues[rx_queue_id];
+ cpfl_rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = &cpfl_rxq->base;
if (rxq == NULL || !rxq->q_set) {
PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
@@ -574,9 +638,10 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_rx_queue *rxq =
- dev->data->rx_queues[rx_queue_id];
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq = dev->data->rx_queues[rx_queue_id];
+ struct idpf_rx_queue *rxq = &cpfl_rxq->base;
int err = 0;
err = idpf_vc_rxq_config(vport, rxq);
@@ -609,15 +674,15 @@ cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
if (tx_queue_id >= dev->data->nb_tx_queues)
return -EINVAL;
- txq = dev->data->tx_queues[tx_queue_id];
+ cpfl_txq = dev->data->tx_queues[tx_queue_id];
/* Init the RX tail register. */
- IDPF_PCI_REG_WRITE(txq->qtx_tail, 0);
+ IDPF_PCI_REG_WRITE(cpfl_txq->base.qtx_tail, 0);
return 0;
}
@@ -625,12 +690,13 @@ cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
int
cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_tx_queue *txq =
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_tx_queue *cpfl_txq =
dev->data->tx_queues[tx_queue_id];
int err = 0;
- err = idpf_vc_txq_config(vport, txq);
+ err = idpf_vc_txq_config(vport, &cpfl_txq->base);
if (err != 0) {
PMD_DRV_LOG(ERR, "Fail to configure Tx queue %u", tx_queue_id);
return err;
@@ -649,7 +715,7 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
tx_queue_id);
} else {
- txq->q_started = true;
+ cpfl_txq->base.q_started = true;
dev->data->tx_queue_state[tx_queue_id] =
RTE_ETH_QUEUE_STATE_STARTED;
}
@@ -660,13 +726,16 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
int
cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq;
struct idpf_rx_queue *rxq;
int err;
if (rx_queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
+ cpfl_rxq = dev->data->rx_queues[rx_queue_id];
err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
@@ -674,7 +743,7 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return err;
}
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = &cpfl_rxq->base;
rxq->q_started = false;
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
rxq->ops->release_mbufs(rxq);
@@ -692,13 +761,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_tx_queue *cpfl_txq;
struct idpf_tx_queue *txq;
int err;
if (tx_queue_id >= dev->data->nb_tx_queues)
return -EINVAL;
+ cpfl_txq = dev->data->tx_queues[tx_queue_id];
+
err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
@@ -706,7 +779,7 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
return err;
}
- txq = dev->data->tx_queues[tx_queue_id];
+ txq = &cpfl_txq->base;
txq->q_started = false;
txq->ops->release_mbufs(txq);
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
@@ -723,25 +796,25 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
void
cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- idpf_qc_rx_queue_release(dev->data->rx_queues[qid]);
+ cpfl_rx_queue_release(dev->data->rx_queues[qid]);
}
void
cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- idpf_qc_tx_queue_release(dev->data->tx_queues[qid]);
+ cpfl_tx_queue_release(dev->data->tx_queues[qid]);
}
void
cpfl_stop_queues(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
- struct idpf_tx_queue *txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
int i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- if (rxq == NULL)
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq == NULL)
continue;
if (cpfl_rx_queue_stop(dev, i) != 0)
@@ -749,8 +822,8 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL)
continue;
if (cpfl_tx_queue_stop(dev, i) != 0)
@@ -761,9 +834,10 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
void
cpfl_set_rx_function(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
#ifdef RTE_ARCH_X86
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i;
if (cpfl_rx_vec_dev_check_default(dev) == CPFL_VECTOR_PATH &&
@@ -789,8 +863,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- (void)idpf_qc_splitq_rx_vec_setup(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ (void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
if (vport->rx_use_avx512) {
@@ -809,8 +883,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
} else {
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- (void)idpf_qc_singleq_rx_vec_setup(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ (void)idpf_qc_singleq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
if (vport->rx_use_avx512) {
@@ -859,10 +933,11 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
void
cpfl_set_tx_function(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
#ifdef RTE_ARCH_X86
#ifdef CC_AVX512_SUPPORT
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
int i;
#endif /* CC_AVX512_SUPPORT */
@@ -877,8 +952,8 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
vport->tx_use_avx512 = true;
if (vport->tx_use_avx512) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- idpf_qc_tx_vec_avx512_setup(txq);
+ cpfl_txq = dev->data->tx_queues[i];
+ idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);
}
}
}
@@ -915,10 +990,10 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
#ifdef CC_AVX512_SUPPORT
if (vport->tx_use_avx512) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL)
continue;
- idpf_qc_tx_vec_avx512_setup(txq);
+ idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);
}
PMD_DRV_LOG(NOTICE,
"Using Single AVX512 Vector Tx (port %d).",
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index fb267d38c8..bfb9ad97bd 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -23,6 +23,14 @@
#define CPFL_SUPPORT_CHAIN_NUM 5
+struct cpfl_rx_queue {
+ struct idpf_rx_queue base;
+};
+
+struct cpfl_tx_queue {
+ struct idpf_tx_queue base;
+};
+
int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
index 665418d27d..5690b17911 100644
--- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
+++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
@@ -76,15 +76,16 @@ cpfl_rx_splitq_vec_default(struct idpf_rx_queue *rxq)
static inline int
cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_rx_queue *rxq;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq;
int i, default_ret, splitq_ret, ret = CPFL_SCALAR_PATH;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- default_ret = cpfl_rx_vec_queue_default(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
- splitq_ret = cpfl_rx_splitq_vec_default(rxq);
+ splitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq->base);
ret = splitq_ret && default_ret;
} else {
ret = default_ret;
@@ -100,12 +101,12 @@ static inline int
cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)
{
int i;
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
int ret = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- ret = cpfl_tx_vec_queue_default(txq);
+ cpfl_txq = dev->data->tx_queues[i];
+ ret = cpfl_tx_vec_queue_default(&cpfl_txq->base);
if (ret == CPFL_SCALAR_PATH)
return CPFL_SCALAR_PATH;
}
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH 02/10] net/cpfl: support hairpin queue capbility get
2023-04-21 6:50 [PATCH 00/10] add hairpin queue support beilei.xing
2023-04-21 6:50 ` [PATCH 01/10] net/cpfl: refine structures beilei.xing
@ 2023-04-21 6:50 ` beilei.xing
2023-04-21 6:50 ` [PATCH 03/10] common/idpf: support queue groups add/delete beilei.xing
` (8 subsequent siblings)
10 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-04-21 6:50 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch adds hairpin_cap_get ops support.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 13 +++++++++++++
drivers/net/cpfl/cpfl_rxtx.h | 4 ++++
2 files changed, 17 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 4a507f05d5..114fc18f5f 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -154,6 +154,18 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,
return rte_eth_linkstatus_set(dev, &new_link);
}
+static int
+cpfl_hairpin_cap_get(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_hairpin_cap *cap)
+{
+ cap->max_nb_queues = CPFL_MAX_P2P_NB_QUEUES;
+ cap->max_rx_2_tx = CPFL_MAX_HAIRPINQ_RX_2_TX;
+ cap->max_tx_2_rx = CPFL_MAX_HAIRPINQ_TX_2_RX;
+ cap->max_nb_desc = CPFL_MAX_HAIRPINQ_NB_DESC;
+
+ return 0;
+}
+
static int
cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
@@ -889,6 +901,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.xstats_get = cpfl_dev_xstats_get,
.xstats_get_names = cpfl_dev_xstats_get_names,
.xstats_reset = cpfl_dev_xstats_reset,
+ .hairpin_cap_get = cpfl_hairpin_cap_get,
};
static int
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index bfb9ad97bd..b2b3537d10 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -13,6 +13,10 @@
#define CPFL_MIN_RING_DESC 32
#define CPFL_MAX_RING_DESC 4096
#define CPFL_DMA_MEM_ALIGN 4096
+#define CPFL_MAX_HAIRPINQ_RX_2_TX 1
+#define CPFL_MAX_HAIRPINQ_TX_2_RX 1
+#define CPFL_MAX_HAIRPINQ_NB_DESC 1024
+#define CPFL_MAX_P2P_NB_QUEUES 16
/* Base address of the HW descriptor ring should be 128B aligned. */
#define CPFL_RING_BASE_ALIGN 128
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH 03/10] common/idpf: support queue groups add/delete
2023-04-21 6:50 [PATCH 00/10] add hairpin queue support beilei.xing
2023-04-21 6:50 ` [PATCH 01/10] net/cpfl: refine structures beilei.xing
2023-04-21 6:50 ` [PATCH 02/10] net/cpfl: support hairpin queue capbility get beilei.xing
@ 2023-04-21 6:50 ` beilei.xing
2023-04-24 8:48 ` Liu, Mingxia
2023-04-24 8:49 ` Liu, Mingxia
2023-04-21 6:50 ` [PATCH 04/10] net/cpfl: add haipin queue group during vpotr init beilei.xing
` (7 subsequent siblings)
10 siblings, 2 replies; 164+ messages in thread
From: beilei.xing @ 2023-04-21 6:50 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds queue group add/delete virtual channel support.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 66 ++++++++++++++++++++++
drivers/common/idpf/idpf_common_virtchnl.h | 9 +++
drivers/common/idpf/version.map | 2 +
3 files changed, 77 insertions(+)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index a4e129062e..76a658bb26 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -359,6 +359,72 @@ idpf_vc_vport_destroy(struct idpf_vport *vport)
return err;
}
+int
+idpf_vc_queue_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *ptp_queue_grps_info,
+ uint8_t *ptp_queue_grps_out)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_cmd_info args;
+ int size, qg_info_size;
+ int err = -1;
+
+ size = sizeof(*ptp_queue_grps_info) +
+ (ptp_queue_grps_info->qg_info.num_queue_groups - 1) *
+ sizeof(struct virtchnl2_queue_group_info);
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_ADD_QUEUE_GROUPS;
+ args.in_args = (uint8_t *)ptp_queue_grps_info;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ if (err != 0) {
+ DRV_LOG(ERR,
+ "Failed to execute command of VIRTCHNL2_OP_ADD_QUEUE_GROUPS");
+ return err;
+ }
+
+ rte_memcpy(ptp_queue_grps_out, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
+ return 0;
+}
+
+int idpf_vc_queue_grps_del(struct idpf_vport *vport,
+ uint16_t num_q_grps,
+ struct virtchnl2_queue_group_id *qg_ids)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_delete_queue_groups *vc_del_q_grps;
+ struct idpf_cmd_info args;
+ int size;
+ int err;
+
+ size = sizeof(*vc_del_q_grps) +
+ (num_q_grps - 1) * sizeof(struct virtchnl2_queue_group_id);
+ vc_del_q_grps = rte_zmalloc("vc_del_q_grps", size, 0);
+
+ vc_del_q_grps->vport_id = vport->vport_id;
+ vc_del_q_grps->num_queue_groups = num_q_grps;
+ memcpy(vc_del_q_grps->qg_ids, qg_ids,
+ num_q_grps * sizeof(struct virtchnl2_queue_group_id));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_DEL_QUEUE_GROUPS;
+ args.in_args = (uint8_t *)vc_del_q_grps;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_DEL_QUEUE_GROUPS");
+
+ rte_free(vc_del_q_grps);
+ return err;
+}
+
int
idpf_vc_rss_key_set(struct idpf_vport *vport)
{
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index d479d93c8e..bf1d014c8d 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -64,4 +64,13 @@ int idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
__rte_internal
int idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
u16 *buff_count, struct idpf_dma_mem **buffs);
+__rte_internal
+int idpf_vc_queue_grps_del(struct idpf_vport *vport,
+ uint16_t num_q_grps,
+ struct virtchnl2_queue_group_id *qg_ids);
+__rte_internal
+int
+idpf_vc_queue_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *ptp_queue_grps_info,
+ uint8_t *ptp_queue_grps_out);
#endif /* _IDPF_COMMON_VIRTCHNL_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 7076759024..aa67f7ee27 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -48,6 +48,8 @@ INTERNAL {
idpf_vc_irq_map_unmap_config;
idpf_vc_one_msg_read;
idpf_vc_ptype_info_query;
+ idpf_vc_queue_grps_add;
+ idpf_vc_queue_grps_del;
idpf_vc_queue_switch;
idpf_vc_queues_ena_dis;
idpf_vc_rss_hash_get;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH 04/10] net/cpfl: add haipin queue group during vpotr init
2023-04-21 6:50 [PATCH 00/10] add hairpin queue support beilei.xing
` (2 preceding siblings ...)
2023-04-21 6:50 ` [PATCH 03/10] common/idpf: support queue groups add/delete beilei.xing
@ 2023-04-21 6:50 ` beilei.xing
2023-04-24 8:55 ` Liu, Mingxia
2023-04-21 6:50 ` [PATCH 05/10] net/cpfl: support hairpin queue setup and release beilei.xing
` (6 subsequent siblings)
10 siblings, 1 reply; 164+ messages in thread
From: beilei.xing @ 2023-04-21 6:50 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds haipin queue group during vpotr init.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 125 +++++++++++++++++++++++++++++++++
drivers/net/cpfl/cpfl_ethdev.h | 17 +++++
drivers/net/cpfl/cpfl_rxtx.h | 4 ++
3 files changed, 146 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 114fc18f5f..ad5ddebd3a 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -856,6 +856,20 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
return 0;
}
+static int
+cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
+{
+ struct virtchnl2_queue_group_id qg_ids[CPFL_P2P_NB_QUEUE_GRPS] = {0};
+ int ret = 0;
+
+ qg_ids[0].queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
+ qg_ids[0].queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
+ ret = idpf_vc_queue_grps_del(vport, CPFL_P2P_NB_QUEUE_GRPS, qg_ids);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to delete p2p queue groups");
+ return ret;
+}
+
static int
cpfl_dev_close(struct rte_eth_dev *dev)
{
@@ -864,6 +878,9 @@ cpfl_dev_close(struct rte_eth_dev *dev)
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
+
+ cpfl_p2p_queue_grps_del(vport);
+
idpf_vport_deinit(vport);
adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
@@ -1350,6 +1367,96 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter)
return vport_idx;
}
+static int
+cpfl_p2p_q_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *p2p_queue_grps_info,
+ uint8_t *p2p_q_vc_out_info)
+{
+ int ret;
+
+ p2p_queue_grps_info->vport_id = vport->vport_id;
+ p2p_queue_grps_info->qg_info.num_queue_groups = CPFL_P2P_NB_QUEUE_GRPS;
+ p2p_queue_grps_info->qg_info.groups[0].num_rx_q = CPFL_MAX_P2P_NB_QUEUES;
+ p2p_queue_grps_info->qg_info.groups[0].num_rx_bufq = CPFL_P2P_NB_RX_BUFQ;
+ p2p_queue_grps_info->qg_info.groups[0].num_tx_q = CPFL_MAX_P2P_NB_QUEUES;
+ p2p_queue_grps_info->qg_info.groups[0].num_tx_complq = CPFL_P2P_NB_TX_COMPLQ;
+ p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
+ p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
+ p2p_queue_grps_info->qg_info.groups[0].rx_q_grp_info.rss_lut_size = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.tx_tc = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.priority = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.is_sp = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.pir_weight = 0;
+
+ ret = idpf_vc_queue_grps_add(vport, p2p_queue_grps_info, p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to add p2p queue groups.");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
+ struct virtchnl2_add_queue_groups *p2p_q_vc_out_info)
+{
+ struct p2p_queue_chunks_info *p2p_q_chunks_info = &cpfl_vport->p2p_q_chunks_info;
+ struct virtchnl2_queue_reg_chunks *vc_chunks_out;
+ int i, type;
+
+ if (p2p_q_vc_out_info->qg_info.groups[0].qg_id.queue_group_type !=
+ VIRTCHNL2_QUEUE_GROUP_P2P) {
+ PMD_DRV_LOG(ERR, "Add queue group response mismatch.");
+ return -EINVAL;
+ }
+
+ vc_chunks_out = &p2p_q_vc_out_info->qg_info.groups[0].chunks;
+
+ for (i = 0; i < vc_chunks_out->num_chunks; i++) {
+ type = vc_chunks_out->chunks[i].type;
+ switch (type) {
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ p2p_q_chunks_info->tx_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->tx_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->tx_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ p2p_q_chunks_info->rx_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->rx_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->rx_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ p2p_q_chunks_info->tx_compl_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->tx_compl_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->tx_compl_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ p2p_q_chunks_info->rx_buf_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->rx_buf_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->rx_buf_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported queue type");
+ break;
+ }
+ }
+
+ return 0;
+}
+
static int
cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
{
@@ -1359,6 +1466,8 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
struct cpfl_adapter_ext *adapter = param->adapter;
/* for sending create vport virtchnl msg prepare */
struct virtchnl2_create_vport create_vport_info;
+ struct virtchnl2_add_queue_groups p2p_queue_grps_info;
+ uint8_t p2p_q_vc_out_info[IDPF_DFLT_MBX_BUF_SIZE] = {0};
int ret = 0;
dev->dev_ops = &cpfl_eth_dev_ops;
@@ -1380,6 +1489,19 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
goto err;
}
+ memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
+ ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to add p2p queue group.");
+ goto err_q_grps_add;
+ }
+ ret = cpfl_p2p_queue_info_init(cpfl_vport,
+ (struct virtchnl2_add_queue_groups *)p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to init p2p queue info.");
+ goto err_p2p_qinfo_init;
+ }
+
adapter->vports[param->idx] = cpfl_vport;
adapter->cur_vports |= RTE_BIT32(param->devarg_id);
adapter->cur_vport_nb++;
@@ -1397,6 +1519,9 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
return 0;
err_mac_addrs:
+err_p2p_qinfo_init:
+ cpfl_p2p_queue_grps_del(vport);
+err_q_grps_add:
adapter->vports[param->idx] = NULL; /* reset */
idpf_vport_deinit(vport);
adapter->cur_vports &= ~RTE_BIT32(param->devarg_id);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 81fe9ac4c3..5e2e7a1bfb 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -69,8 +69,25 @@ struct cpfl_devargs {
uint16_t req_vport_nb;
};
+struct p2p_queue_chunks_info {
+ uint32_t tx_start_qid;
+ uint32_t rx_start_qid;
+ uint32_t tx_compl_start_qid;
+ uint32_t rx_buf_start_qid;
+
+ uint64_t tx_qtail_start;
+ uint32_t tx_qtail_spacing;
+ uint64_t rx_qtail_start;
+ uint32_t rx_qtail_spacing;
+ uint64_t tx_compl_qtail_start;
+ uint32_t tx_compl_qtail_spacing;
+ uint64_t rx_buf_qtail_start;
+ uint32_t rx_buf_qtail_spacing;
+};
+
struct cpfl_vport {
struct idpf_vport base;
+ struct p2p_queue_chunks_info p2p_q_chunks_info;
};
struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index b2b3537d10..3a87a1f4b3 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -17,6 +17,10 @@
#define CPFL_MAX_HAIRPINQ_TX_2_RX 1
#define CPFL_MAX_HAIRPINQ_NB_DESC 1024
#define CPFL_MAX_P2P_NB_QUEUES 16
+#define CPFL_P2P_NB_RX_BUFQ 1
+#define CPFL_P2P_NB_TX_COMPLQ 1
+#define CPFL_P2P_NB_QUEUE_GRPS 1
+#define CPFL_P2P_QUEUE_GRP_ID 1
/* Base address of the HW descriptor ring should be 128B aligned. */
#define CPFL_RING_BASE_ALIGN 128
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH 05/10] net/cpfl: support hairpin queue setup and release
2023-04-21 6:50 [PATCH 00/10] add hairpin queue support beilei.xing
` (3 preceding siblings ...)
2023-04-21 6:50 ` [PATCH 04/10] net/cpfl: add haipin queue group during vpotr init beilei.xing
@ 2023-04-21 6:50 ` beilei.xing
2023-04-21 6:50 ` [PATCH 06/10] net/cpfl: support hairpin queue configuration beilei.xing
` (5 subsequent siblings)
10 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-04-21 6:50 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
Support hairpin Rx/Tx queue setup and release.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 6 +
drivers/net/cpfl/cpfl_ethdev.h | 10 +
drivers/net/cpfl/cpfl_rxtx.c | 373 +++++++++++++++++++++++-
drivers/net/cpfl/cpfl_rxtx.h | 28 ++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 4 +
5 files changed, 420 insertions(+), 1 deletion(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index ad5ddebd3a..d3300f17cc 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -878,6 +878,10 @@ cpfl_dev_close(struct rte_eth_dev *dev)
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
+ if (cpfl_vport->p2p_mp) {
+ rte_mempool_free(cpfl_vport->p2p_mp);
+ cpfl_vport->p2p_mp = NULL;
+ }
cpfl_p2p_queue_grps_del(vport);
@@ -919,6 +923,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.xstats_get_names = cpfl_dev_xstats_get_names,
.xstats_reset = cpfl_dev_xstats_reset,
.hairpin_cap_get = cpfl_hairpin_cap_get,
+ .rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
+ .tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
};
static int
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 5e2e7a1bfb..2cc8790da0 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -88,6 +88,16 @@ struct p2p_queue_chunks_info {
struct cpfl_vport {
struct idpf_vport base;
struct p2p_queue_chunks_info p2p_q_chunks_info;
+
+ struct rte_mempool *p2p_mp;
+
+ uint16_t nb_data_rxq;
+ uint16_t nb_data_txq;
+ uint16_t nb_p2p_rxq;
+ uint16_t nb_p2p_txq;
+
+ struct idpf_rx_queue *p2p_rx_bufq;
+ struct idpf_tx_queue *p2p_tx_complq;
};
struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index a441e2ffbe..64ed331a6d 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -10,6 +10,79 @@
#include "cpfl_rxtx.h"
#include "cpfl_rxtx_vec_common.h"
+uint16_t
+cpfl_hw_qid_get(uint16_t start_qid, uint16_t offset)
+{
+ return start_qid + offset;
+}
+
+uint64_t
+cpfl_hw_qtail_get(uint64_t tail_start, uint16_t offset, uint64_t tail_spacing)
+{
+ return tail_start + offset * tail_spacing;
+}
+
+static inline void
+cpfl_tx_hairpin_descq_reset(struct idpf_tx_queue *txq)
+{
+ uint32_t i, size;
+
+ if (!txq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+ return;
+ }
+
+ size = txq->nb_tx_desc * CPFL_P2P_DESC_LEN;
+ for (i = 0; i < size; i++)
+ ((volatile char *)txq->desc_ring)[i] = 0;
+}
+
+static inline void
+cpfl_tx_hairpin_complq_reset(struct idpf_tx_queue *cq)
+{
+ uint32_t i, size;
+
+ if (!cq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to complq is NULL");
+ return;
+ }
+
+ size = cq->nb_tx_desc * CPFL_P2P_DESC_LEN;
+ for (i = 0; i < size; i++)
+ ((volatile char *)cq->compl_ring)[i] = 0;
+}
+
+static inline void
+cpfl_rx_hairpin_descq_reset(struct idpf_rx_queue *rxq)
+{
+ uint16_t len;
+ uint32_t i;
+
+ if (!rxq)
+ return;
+
+ len = rxq->nb_rx_desc;
+ for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
+ ((volatile char *)rxq->rx_ring)[i] = 0;
+}
+
+static inline void
+cpfl_rx_hairpin_bufq_reset(struct idpf_rx_queue *rxbq)
+{
+ uint16_t len;
+ uint32_t i;
+
+ if (!rxbq)
+ return;
+
+ len = rxbq->nb_rx_desc;
+ for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
+ ((volatile char *)rxbq->rx_ring)[i] = 0;
+
+ rxbq->bufq1 = NULL;
+ rxbq->bufq2 = NULL;
+}
+
static uint64_t
cpfl_rx_offload_convert(uint64_t offload)
{
@@ -233,7 +306,10 @@ cpfl_rx_queue_release(void *rxq)
/* Split queue */
if (!q->adapter->is_rx_singleq) {
- if (q->bufq2)
+ /* the mz is shared between Tx/Rx hairpin, let Rx_release
+ * free the buf, q->bufq1->mz and q->mz.
+ */
+ if (!cpfl_rxq->hairpin_info.hairpin_q && q->bufq2)
cpfl_rx_split_bufq_release(q->bufq2);
if (q->bufq1)
@@ -384,6 +460,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
}
+ cpfl_vport->nb_data_rxq++;
rxq->q_set = true;
dev->data->rx_queues[queue_idx] = cpfl_rxq;
@@ -547,6 +624,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
queue_idx * vport->chunks_info.tx_qtail_spacing);
txq->ops = &def_txq_ops;
+ cpfl_vport->nb_data_txq++;
txq->q_set = true;
dev->data->tx_queues[queue_idx] = cpfl_txq;
@@ -561,6 +639,297 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return ret;
}
+static int
+cpfl_rx_hairpin_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,
+ uint16_t logic_qid, uint16_t nb_desc)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct rte_mempool *mp;
+ char pool_name[RTE_MEMPOOL_NAMESIZE];
+
+ mp = cpfl_vport->p2p_mp;
+ if (!mp) {
+ snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "p2p_mb_pool_%u",
+ dev->data->port_id);
+ mp = rte_pktmbuf_pool_create(pool_name, CPFL_P2P_NB_MBUF, CPFL_P2P_CACHE_SIZE,
+ 0, CPFL_P2P_MBUF_SIZE, dev->device->numa_node);
+ if (!mp) {
+ PMD_INIT_LOG(ERR, "Failed to allocate mbuf pool for p2p");
+ return -ENOMEM;
+ }
+ cpfl_vport->p2p_mp = mp;
+ }
+
+ bufq->mp = mp;
+ bufq->nb_rx_desc = nb_desc;
+ bufq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info.rx_buf_start_qid, logic_qid);
+ bufq->port_id = dev->data->port_id;
+ bufq->adapter = adapter;
+ bufq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
+
+ bufq->sw_ring = rte_zmalloc("sw ring",
+ sizeof(struct rte_mbuf *) * nb_desc,
+ RTE_CACHE_LINE_SIZE);
+ if (!bufq->sw_ring) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
+ return -ENOMEM;
+ }
+
+ bufq->q_set = true;
+ bufq->ops = &def_rxq_ops;
+
+ return 0;
+}
+
+int
+cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf)
+{
+ struct cpfl_vport *cpfl_vport = (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter_base = vport->adapter;
+ uint16_t logic_qid = cpfl_vport->nb_p2p_rxq;
+ struct cpfl_rxq_hairpin_info *hairpin_info;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct idpf_rx_queue *bufq1 = NULL;
+ struct idpf_rx_queue *rxq;
+ uint16_t peer_port, peer_q;
+ uint16_t qid;
+ int ret;
+
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue.");
+ return -EINVAL;
+ }
+
+ if (conf->peer_count != 1) {
+ PMD_INIT_LOG(ERR, "Can't support Rx hairpin queue peer count %d", conf->peer_count);
+ return -EINVAL;
+ }
+
+ peer_port = conf->peers[0].port;
+ peer_q = conf->peers[0].queue;
+
+ if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
+ nb_desc > CPFL_MAX_RING_DESC ||
+ nb_desc < CPFL_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is invalid", nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory if needed */
+ if (dev->data->rx_queues[queue_idx]) {
+ cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* Setup Rx description queue */
+ cpfl_rxq = rte_zmalloc_socket("cpfl hairpin rxq",
+ sizeof(struct cpfl_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!cpfl_rxq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
+ return -ENOMEM;
+ }
+
+ rxq = &cpfl_rxq->base;
+ hairpin_info = &cpfl_rxq->hairpin_info;
+ rxq->nb_rx_desc = nb_desc * 2;
+ rxq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info.rx_start_qid, logic_qid);
+ rxq->port_id = dev->data->port_id;
+ rxq->adapter = adapter_base;
+ rxq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
+ hairpin_info->hairpin_q = true;
+ hairpin_info->peer_txp = peer_port;
+ hairpin_info->peer_txq_id = peer_q;
+
+ if (conf->manual_bind != 0)
+ hairpin_info->manual_bind = true;
+ else
+ hairpin_info->manual_bind = false;
+
+ /* setup 1 Rx buffer queue for the 1st hairpin rxq */
+ if (logic_qid == 0) {
+ bufq1 = rte_zmalloc_socket("hairpin rx bufq1",
+ sizeof(struct idpf_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!bufq1) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for hairpin Rx buffer queue 1.");
+ ret = -ENOMEM;
+ goto err_alloc_bufq1;
+ }
+ qid = 2 * logic_qid;
+ ret = cpfl_rx_hairpin_bufq_setup(dev, bufq1, qid, nb_desc);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to setup hairpin Rx buffer queue 1");
+ ret = -EINVAL;
+ goto err_setup_bufq1;
+ }
+ cpfl_vport->p2p_rx_bufq = bufq1;
+ }
+
+ rxq->bufq1 = cpfl_vport->p2p_rx_bufq;
+ rxq->bufq2 = NULL;
+
+ cpfl_vport->nb_p2p_rxq++;
+ rxq->q_set = true;
+ dev->data->rx_queues[queue_idx] = cpfl_rxq;
+
+ return 0;
+
+err_setup_bufq1:
+ rte_free(bufq1);
+err_alloc_bufq1:
+ rte_free(rxq);
+
+ return ret;
+}
+
+int
+cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter_base = vport->adapter;
+ uint16_t logic_qid = cpfl_vport->nb_p2p_txq;
+ struct cpfl_txq_hairpin_info *hairpin_info;
+ struct idpf_hw *hw = &adapter_base->hw;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct idpf_tx_queue *txq, *cq;
+ const struct rte_memzone *mz;
+ uint32_t ring_size;
+ uint16_t peer_port, peer_q;
+
+ if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue.");
+ return -EINVAL;
+ }
+
+ if (conf->peer_count != 1) {
+ PMD_INIT_LOG(ERR, "Can't support Tx hairpin queue peer count %d", conf->peer_count);
+ return -EINVAL;
+ }
+
+ peer_port = conf->peers[0].port;
+ peer_q = conf->peers[0].queue;
+
+ if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
+ nb_desc > CPFL_MAX_RING_DESC ||
+ nb_desc < CPFL_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is invalid",
+ nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory if needed. */
+ if (dev->data->tx_queues[queue_idx]) {
+ cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate the TX queue data structure. */
+ cpfl_txq = rte_zmalloc_socket("cpfl hairpin txq",
+ sizeof(struct cpfl_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!cpfl_txq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+ return -ENOMEM;
+ }
+
+ txq = &cpfl_txq->base;
+ hairpin_info = &cpfl_txq->hairpin_info;
+ /* Txq ring length should be 2 times of Tx completion queue size. */
+ txq->nb_tx_desc = nb_desc * 2;
+ txq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info.tx_start_qid, logic_qid);
+ txq->port_id = dev->data->port_id;
+ hairpin_info->hairpin_q = true;
+ hairpin_info->peer_rxp = peer_port;
+ hairpin_info->peer_rxq_id = peer_q;
+
+ if (conf->manual_bind != 0)
+ hairpin_info->manual_bind = true;
+ else
+ hairpin_info->manual_bind = false;
+
+ /* Always Tx hairpin queue allocates Tx HW ring */
+ ring_size = RTE_ALIGN(txq->nb_tx_desc * CPFL_P2P_DESC_LEN,
+ CPFL_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_ring", logic_qid,
+ ring_size + CPFL_P2P_RING_BUF,
+ CPFL_RING_BASE_ALIGN,
+ dev->device->numa_node);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+ return -ENOMEM;
+ }
+
+ txq->tx_ring_phys_addr = mz->iova;
+ txq->desc_ring = mz->addr;
+ txq->mz = mz;
+
+ cpfl_tx_hairpin_descq_reset(txq);
+ txq->qtx_tail = hw->hw_addr +
+ cpfl_hw_qtail_get(cpfl_vport->p2p_q_chunks_info.tx_qtail_start,
+ logic_qid, cpfl_vport->p2p_q_chunks_info.tx_qtail_spacing);
+ txq->ops = &def_txq_ops;
+
+ if (cpfl_vport->p2p_tx_complq == NULL) {
+ cq = rte_zmalloc_socket("cpfl hairpin cq",
+ sizeof(struct idpf_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ dev->device->numa_node);
+ if (!cq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+ return -ENOMEM;
+ }
+
+ cq->nb_tx_desc = nb_desc;
+ cq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info.tx_compl_start_qid, 0);
+ cq->port_id = dev->data->port_id;
+
+ /* Tx completion queue always allocates the HW ring */
+ ring_size = RTE_ALIGN(cq->nb_tx_desc * CPFL_P2P_DESC_LEN,
+ CPFL_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_compl_ring", logic_qid,
+ ring_size + CPFL_P2P_RING_BUF,
+ CPFL_RING_BASE_ALIGN,
+ dev->device->numa_node);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX completion queue");
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+ return -ENOMEM;
+ }
+ cq->tx_ring_phys_addr = mz->iova;
+ cq->compl_ring = mz->addr;
+ cq->mz = mz;
+
+ cpfl_tx_hairpin_complq_reset(cq);
+ cpfl_vport->p2p_tx_complq = cq;
+ }
+
+ txq->complq = cpfl_vport->p2p_tx_complq;
+
+ cpfl_vport->nb_p2p_txq++;
+ txq->q_set = true;
+ dev->data->tx_queues[queue_idx] = cpfl_txq;
+
+ return 0;
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
@@ -864,6 +1233,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ continue;
(void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 3a87a1f4b3..d844c9f057 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -13,6 +13,7 @@
#define CPFL_MIN_RING_DESC 32
#define CPFL_MAX_RING_DESC 4096
#define CPFL_DMA_MEM_ALIGN 4096
+#define CPFL_P2P_DESC_LEN 16
#define CPFL_MAX_HAIRPINQ_RX_2_TX 1
#define CPFL_MAX_HAIRPINQ_TX_2_RX 1
#define CPFL_MAX_HAIRPINQ_NB_DESC 1024
@@ -21,6 +22,10 @@
#define CPFL_P2P_NB_TX_COMPLQ 1
#define CPFL_P2P_NB_QUEUE_GRPS 1
#define CPFL_P2P_QUEUE_GRP_ID 1
+#define CPFL_P2P_NB_MBUF 4096
+#define CPFL_P2P_CACHE_SIZE 250
+#define CPFL_P2P_MBUF_SIZE 2048
+#define CPFL_P2P_RING_BUF 128
/* Base address of the HW descriptor ring should be 128B aligned. */
#define CPFL_RING_BASE_ALIGN 128
@@ -31,12 +36,28 @@
#define CPFL_SUPPORT_CHAIN_NUM 5
+struct cpfl_rxq_hairpin_info {
+ bool hairpin_q; /* if rx queue is a hairpin queue */
+ bool manual_bind; /* for cross vport */
+ uint16_t peer_txp;
+ uint16_t peer_txq_id;
+};
+
struct cpfl_rx_queue {
struct idpf_rx_queue base;
+ struct cpfl_rxq_hairpin_info hairpin_info;
+};
+
+struct cpfl_txq_hairpin_info {
+ bool hairpin_q; /* if tx queue is a hairpin queue */
+ bool manual_bind; /* for cross vport */
+ uint16_t peer_rxp;
+ uint16_t peer_rxq_id;
};
struct cpfl_tx_queue {
struct idpf_tx_queue base;
+ struct cpfl_txq_hairpin_info hairpin_info;
};
int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
@@ -57,4 +78,11 @@ void cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void cpfl_set_rx_function(struct rte_eth_dev *dev);
void cpfl_set_tx_function(struct rte_eth_dev *dev);
+uint16_t cpfl_hw_qid_get(uint16_t start_qid, uint16_t offset);
+uint64_t cpfl_hw_qtail_get(uint64_t tail_start, uint16_t offset, uint64_t tail_spacing);
+int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, const struct rte_eth_hairpin_conf *conf);
+int cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf);
#endif /* _CPFL_RXTX_H_ */
diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
index 5690b17911..d8e9191196 100644
--- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
+++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
@@ -85,6 +85,8 @@ cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)
cpfl_rxq = dev->data->rx_queues[i];
default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ continue;
splitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq->base);
ret = splitq_ret && default_ret;
} else {
@@ -106,6 +108,8 @@ cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_tx_queues; i++) {
cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq->hairpin_info.hairpin_q)
+ continue;
ret = cpfl_tx_vec_queue_default(&cpfl_txq->base);
if (ret == CPFL_SCALAR_PATH)
return CPFL_SCALAR_PATH;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH 06/10] net/cpfl: support hairpin queue configuration
2023-04-21 6:50 [PATCH 00/10] add hairpin queue support beilei.xing
` (4 preceding siblings ...)
2023-04-21 6:50 ` [PATCH 05/10] net/cpfl: support hairpin queue setup and release beilei.xing
@ 2023-04-21 6:50 ` beilei.xing
2023-04-24 9:48 ` Liu, Mingxia
2023-04-21 6:50 ` [PATCH 07/10] net/cpfl: support hairpin queue start/stop beilei.xing
` (4 subsequent siblings)
10 siblings, 1 reply; 164+ messages in thread
From: beilei.xing @ 2023-04-21 6:50 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx hairpin queue configuration.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 70 +++++++++++
drivers/common/idpf/idpf_common_virtchnl.h | 6 +
drivers/common/idpf/version.map | 2 +
drivers/net/cpfl/cpfl_ethdev.c | 136 ++++++++++++++++++++-
drivers/net/cpfl/cpfl_rxtx.c | 80 ++++++++++++
drivers/net/cpfl/cpfl_rxtx.h | 7 ++
6 files changed, 297 insertions(+), 4 deletions(-)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index 76a658bb26..50cd43a8dd 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -1050,6 +1050,41 @@ idpf_vc_rxq_config(struct idpf_vport *vport, struct idpf_rx_queue *rxq)
return err;
}
+int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct virtchnl2_rxq_info *rxq_info,
+ uint16_t num_qs)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+ struct idpf_cmd_info args;
+ int size, err, i;
+
+ size = sizeof(*vc_rxqs) + (num_qs - 1) *
+ sizeof(struct virtchnl2_rxq_info);
+ vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+ if (vc_rxqs == NULL) {
+ DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+ err = -ENOMEM;
+ return err;
+ }
+ vc_rxqs->vport_id = vport->vport_id;
+ vc_rxqs->num_qinfo = num_qs;
+ memcpy(vc_rxqs->qinfo, rxq_info, num_qs * sizeof(struct virtchnl2_rxq_info));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+ args.in_args = (uint8_t *)vc_rxqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ rte_free(vc_rxqs);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+ return err;
+}
+
int
idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq)
{
@@ -1121,6 +1156,41 @@ idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq)
return err;
}
+int
+idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
+ uint16_t num_qs)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+ struct idpf_cmd_info args;
+ int size, err;
+
+ size = sizeof(*vc_txqs) + (num_qs - 1) * sizeof(struct virtchnl2_txq_info);
+ vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+ if (vc_txqs == NULL) {
+ DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+ err = -ENOMEM;
+ return err;
+ }
+ vc_txqs->vport_id = vport->vport_id;
+ vc_txqs->num_qinfo = num_qs;
+ memcpy(vc_txqs->qinfo, txq_info, num_qs * sizeof(struct virtchnl2_txq_info));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+ args.in_args = (uint8_t *)vc_txqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ rte_free(vc_txqs);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+ return err;
+}
+
int
idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
struct idpf_ctlq_msg *q_msg)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index bf1d014c8d..277235ba7d 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -65,6 +65,12 @@ __rte_internal
int idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
u16 *buff_count, struct idpf_dma_mem **buffs);
__rte_internal
+int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct virtchnl2_rxq_info *rxq_info,
+ uint16_t num_qs);
+__rte_internal
+int idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
+ uint16_t num_qs);
+__rte_internal
int idpf_vc_queue_grps_del(struct idpf_vport *vport,
uint16_t num_q_grps,
struct virtchnl2_queue_group_id *qg_ids);
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index aa67f7ee27..a339a4bf8e 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -59,8 +59,10 @@ INTERNAL {
idpf_vc_rss_lut_get;
idpf_vc_rss_lut_set;
idpf_vc_rxq_config;
+ idpf_vc_rxq_config_by_info;
idpf_vc_stats_query;
idpf_vc_txq_config;
+ idpf_vc_txq_config_by_info;
idpf_vc_vectors_alloc;
idpf_vc_vectors_dealloc;
idpf_vc_vport_create;
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index d3300f17cc..13edf2e706 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -737,32 +737,160 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
return idpf_vport_irq_map_config(vport, nb_rx_queues);
}
+/* Update hairpin_info for dev's tx hairpin queue */
+static int
+cpfl_txq_hairpin_info_update(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port];
+ struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private;
+ struct cpfl_txq_hairpin_info *hairpin_info;
+ struct cpfl_tx_queue *cpfl_txq;
+ int i;
+
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ hairpin_info = &cpfl_txq->hairpin_info;
+ if (hairpin_info->peer_rxp != rx_port) {
+ PMD_DRV_LOG(ERR, "port %d is not the peer port", rx_port);
+ return -EINVAL;
+ }
+ hairpin_info->peer_rxq_id =
+ cpfl_hw_qid_get(cpfl_rx_vport->p2p_q_chunks_info.rx_start_qid,
+ hairpin_info->peer_rxq_id - cpfl_rx_vport->nb_data_rxq);
+ }
+
+ return 0;
+}
+
+/* Bind Rx hairpin queue's memory zone to peer Tx hairpin queue's memory zone */
+static void
+cpfl_rxq_hairpin_mz_bind(struct rte_eth_dev *dev)
+{
+ struct cpfl_vport *cpfl_rx_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_rx_vport->base;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_hw *hw = &adapter->hw;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct rte_eth_dev *peer_dev;
+ const struct rte_memzone *mz;
+ uint16_t peer_tx_port;
+ uint16_t peer_tx_qid;
+ int i;
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ peer_tx_port = cpfl_rxq->hairpin_info.peer_txp;
+ peer_tx_qid = cpfl_rxq->hairpin_info.peer_txq_id;
+ peer_dev = &rte_eth_devices[peer_tx_port];
+ cpfl_txq = peer_dev->data->tx_queues[peer_tx_qid];
+
+ /* bind rx queue */
+ mz = cpfl_txq->base.mz;
+ cpfl_rxq->base.rx_ring_phys_addr = mz->iova;
+ cpfl_rxq->base.rx_ring = mz->addr;
+ cpfl_rxq->base.mz = mz;
+
+ /* bind rx buffer queue */
+ mz = cpfl_txq->base.complq->mz;
+ cpfl_rxq->base.bufq1->rx_ring_phys_addr = mz->iova;
+ cpfl_rxq->base.bufq1->rx_ring = mz->addr;
+ cpfl_rxq->base.bufq1->mz = mz;
+ cpfl_rxq->base.bufq1->qrx_tail = hw->hw_addr +
+ cpfl_hw_qtail_get(cpfl_rx_vport->p2p_q_chunks_info.rx_buf_qtail_start,
+ 0, cpfl_rx_vport->p2p_q_chunks_info.rx_buf_qtail_spacing);
+ }
+}
+
static int
cpfl_start_queues(struct rte_eth_dev *dev)
{
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_rx_queue *cpfl_rxq;
struct cpfl_tx_queue *cpfl_txq;
+ int tx_cmplq_flag = 0;
+ int rx_bufq_flag = 0;
+ int flag = 0;
int err = 0;
int i;
+ /* For normal data queues, configure, init and enale Txq.
+ * For non-cross vport hairpin queues, configure Txq.
+ */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
cpfl_txq = dev->data->tx_queues[i];
if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
continue;
- err = cpfl_tx_queue_start(dev, i);
+ if (!cpfl_txq->hairpin_info.hairpin_q) {
+ err = cpfl_tx_queue_start(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
+ return err;
+ }
+ } else if (!cpfl_txq->hairpin_info.manual_bind) {
+ if (flag == 0) {
+ err = cpfl_txq_hairpin_info_update(dev,
+ cpfl_txq->hairpin_info.peer_rxp);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to update Tx hairpin queue info");
+ return err;
+ }
+ flag = 1;
+ }
+ err = cpfl_hairpin_txq_config(vport, cpfl_txq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u", i);
+ return err;
+ }
+ tx_cmplq_flag = 1;
+ }
+ }
+
+ /* For non-cross vport hairpin queues, configure Tx completion queue first.*/
+ if (tx_cmplq_flag == 1 && cpfl_vport->p2p_tx_complq != NULL) {
+ err = cpfl_hairpin_tx_complq_config(cpfl_vport);
if (err != 0) {
- PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
+ PMD_DRV_LOG(ERR, "Fail to config Tx completion queue");
return err;
}
}
+ /* For normal data queues, configure, init and enale Rxq.
+ * For non-cross vport hairpin queues, configure Rxq, and then init Rxq.
+ */
+ cpfl_rxq_hairpin_mz_bind(dev);
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)
continue;
- err = cpfl_rx_queue_start(dev, i);
+ if (!cpfl_rxq->hairpin_info.hairpin_q) {
+ err = cpfl_rx_queue_start(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
+ return err;
+ }
+ } else if (!cpfl_rxq->hairpin_info.manual_bind) {
+ err = cpfl_hairpin_rxq_config(vport, cpfl_rxq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue %u", i);
+ return err;
+ }
+ err = cpfl_rx_queue_init(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u", i);
+ return err;
+ }
+ rx_bufq_flag = 1;
+ }
+ }
+
+ /* For non-cross vport hairpin queues, configure Rx buffer queue.*/
+ if (rx_bufq_flag == 1 && cpfl_vport->p2p_rx_bufq != NULL) {
+ err = cpfl_hairpin_rx_bufq_config(cpfl_vport);
if (err != 0) {
- PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
+ PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue");
return err;
}
}
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 64ed331a6d..040beb5bac 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -930,6 +930,86 @@ cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return 0;
}
+int
+cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport)
+{
+ struct idpf_rx_queue *rx_bufq = cpfl_vport->p2p_rx_bufq;
+ struct virtchnl2_rxq_info rxq_info[1] = {0};
+
+ rxq_info[0].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ rxq_info[0].queue_id = rx_bufq->queue_id;
+ rxq_info[0].ring_len = rx_bufq->nb_rx_desc;
+ rxq_info[0].dma_ring_addr = rx_bufq->rx_ring_phys_addr;
+ rxq_info[0].desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ rxq_info[0].rx_buffer_low_watermark = CPFL_RXBUF_LOW_WATERMARK;
+ rxq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ rxq_info[0].data_buffer_size = rx_bufq->rx_buf_len;
+ rxq_info[0].buffer_notif_stride = CPFL_RX_BUF_STRIDE;
+
+ return idpf_vc_rxq_config_by_info(&cpfl_vport->base, rxq_info, 1);
+}
+
+int
+cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq)
+{
+ struct virtchnl2_rxq_info rxq_info[1] = {0};
+ struct idpf_rx_queue *rxq = &cpfl_rxq->base;
+
+ rxq_info[0].type = VIRTCHNL2_QUEUE_TYPE_RX;
+ rxq_info[0].queue_id = rxq->queue_id;
+ rxq_info[0].ring_len = rxq->nb_rx_desc;
+ rxq_info[0].dma_ring_addr = rxq->rx_ring_phys_addr;
+ rxq_info[0].rx_bufq1_id = rxq->bufq1->queue_id;
+ rxq_info[0].max_pkt_size = vport->max_pkt_len;
+ rxq_info[0].desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ rxq_info[0].qflags |= VIRTCHNL2_RX_DESC_SIZE_16BYTE;
+
+ rxq_info[0].data_buffer_size = rxq->rx_buf_len;
+ rxq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ rxq_info[0].rx_buffer_low_watermark = CPFL_RXBUF_LOW_WATERMARK;
+
+ PMD_DRV_LOG(NOTICE, "hairpin: vport %u, Rxq id 0x%x",
+ vport->vport_id, rxq_info[0].queue_id);
+
+ return idpf_vc_rxq_config_by_info(vport, rxq_info, 1);
+}
+
+int
+cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport)
+{
+ struct idpf_tx_queue *tx_complq = cpfl_vport->p2p_tx_complq;
+ struct virtchnl2_txq_info txq_info[1] = {0};
+
+ txq_info[0].dma_ring_addr = tx_complq->tx_ring_phys_addr;
+ txq_info[0].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ txq_info[0].queue_id = tx_complq->queue_id;
+ txq_info[0].ring_len = tx_complq->nb_tx_desc;
+ txq_info[0].peer_rx_queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+ txq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ txq_info[0].sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+
+ return idpf_vc_txq_config_by_info(&cpfl_vport->base, txq_info, 1);
+}
+
+int
+cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq)
+{
+ struct idpf_tx_queue *txq = &cpfl_txq->base;
+ struct virtchnl2_txq_info txq_info[1] = {0};
+
+ txq_info[0].dma_ring_addr = txq->tx_ring_phys_addr;
+ txq_info[0].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ txq_info[0].queue_id = txq->queue_id;
+ txq_info[0].ring_len = txq->nb_tx_desc;
+ txq_info[0].tx_compl_queue_id = txq->complq->queue_id;
+ txq_info[0].relative_queue_id = txq->queue_id;
+ txq_info[0].peer_rx_queue_id = cpfl_txq->hairpin_info.peer_rxq_id;
+ txq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ txq_info[0].sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+
+ return idpf_vc_txq_config_by_info(vport, txq_info, 1);
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index d844c9f057..b01ce5edf9 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -30,12 +30,15 @@
#define CPFL_RING_BASE_ALIGN 128
#define CPFL_DEFAULT_RX_FREE_THRESH 32
+#define CPFL_RXBUF_LOW_WATERMARK 64
#define CPFL_DEFAULT_TX_RS_THRESH 32
#define CPFL_DEFAULT_TX_FREE_THRESH 32
#define CPFL_SUPPORT_CHAIN_NUM 5
+#define CPFL_RX_BUF_STRIDE 64
+
struct cpfl_rxq_hairpin_info {
bool hairpin_q; /* if rx queue is a hairpin queue */
bool manual_bind; /* for cross vport */
@@ -85,4 +88,8 @@ int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
int cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc,
const struct rte_eth_hairpin_conf *conf);
+int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport);
+int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq);
+int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
+int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
#endif /* _CPFL_RXTX_H_ */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH 07/10] net/cpfl: support hairpin queue start/stop
2023-04-21 6:50 [PATCH 00/10] add hairpin queue support beilei.xing
` (5 preceding siblings ...)
2023-04-21 6:50 ` [PATCH 06/10] net/cpfl: support hairpin queue configuration beilei.xing
@ 2023-04-21 6:50 ` beilei.xing
2023-04-21 6:50 ` [PATCH 08/10] net/cpfl: enable write back based on ITR expire beilei.xing
` (3 subsequent siblings)
10 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-04-21 6:50 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx hairpin queue start/stop.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 2 +-
drivers/common/idpf/idpf_common_virtchnl.h | 3 +
drivers/common/idpf/version.map | 1 +
drivers/net/cpfl/cpfl_ethdev.c | 39 ++++++
drivers/net/cpfl/cpfl_rxtx.c | 153 ++++++++++++++++++---
drivers/net/cpfl/cpfl_rxtx.h | 14 ++
6 files changed, 193 insertions(+), 19 deletions(-)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index 50cd43a8dd..20a5bc085d 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -733,7 +733,7 @@ idpf_vc_vectors_dealloc(struct idpf_vport *vport)
return err;
}
-static int
+int
idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
uint32_t type, bool on)
{
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index 277235ba7d..18db6cd8c8 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -71,6 +71,9 @@ __rte_internal
int idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
uint16_t num_qs);
__rte_internal
+int idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
+ uint32_t type, bool on);
+__rte_internal
int idpf_vc_queue_grps_del(struct idpf_vport *vport,
uint16_t num_q_grps,
struct virtchnl2_queue_group_id *qg_ids);
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index a339a4bf8e..0e87dba2ae 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -45,6 +45,7 @@ INTERNAL {
idpf_vc_cmd_execute;
idpf_vc_ctlq_post_rx_buffs;
idpf_vc_ctlq_recv;
+ idpf_vc_ena_dis_one_queue;
idpf_vc_irq_map_unmap_config;
idpf_vc_one_msg_read;
idpf_vc_ptype_info_query;
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 13edf2e706..f154c83f27 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -895,6 +895,45 @@ cpfl_start_queues(struct rte_eth_dev *dev)
}
}
+ /* For non-cross vport hairpin queues, enable Tx queue and Rx queue,
+ * then enable Tx completion queue and Rx buffer queue.
+ */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq->hairpin_info.hairpin_q && !cpfl_txq->hairpin_info.manual_bind) {
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ i - cpfl_vport->nb_data_txq,
+ false, true);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u on",
+ i);
+ else
+ cpfl_txq->base.q_started = true;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q && !cpfl_rxq->hairpin_info.manual_bind) {
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ i - cpfl_vport->nb_data_rxq,
+ true, true);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u on",
+ i);
+ else
+ cpfl_rxq->base.q_started = true;
+ }
+ }
+
+ if (tx_cmplq_flag == 1 && rx_bufq_flag == 1) {
+ err = cpfl_switch_hairpin_bufq_complq(cpfl_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq and Rx bufq");
+ return err;
+ }
+ }
+
return err;
}
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 040beb5bac..ed2d100c35 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -1010,6 +1010,83 @@ cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq
return idpf_vc_txq_config_by_info(vport, txq_info, 1);
}
+int
+cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ queue_id = cpfl_vport->p2p_tx_complq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+ if (err)
+ return err;
+
+ type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
+int
+cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t logic_qid,
+ bool rx, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = rx ? VIRTCHNL2_QUEUE_TYPE_RX : VIRTCHNL2_QUEUE_TYPE_TX;
+
+ if (type == VIRTCHNL2_QUEUE_TYPE_RX)
+ queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info.rx_start_qid, logic_qid);
+ else
+ queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info.tx_start_qid, logic_qid);
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+ if (err)
+ return err;
+
+ return err;
+}
+
+static int
+cpfl_alloc_split_p2p_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+ volatile struct virtchnl2_p2p_rx_buf_desc *rxd;
+ struct rte_mbuf *mbuf = NULL;
+ uint64_t dma_addr;
+ uint16_t i;
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ mbuf = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!mbuf)) {
+ PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+ return -ENOMEM;
+ }
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+ rxd = &((volatile struct virtchnl2_p2p_rx_buf_desc *)(rxq->rx_ring))[i];
+ rxd->reserve0 = 0;
+ rxd->pkt_addr = dma_addr;
+
+ rxq->sw_ring[i] = mbuf;
+ }
+
+ rxq->nb_rx_hold = 0;
+ /* The value written in the RX buffer queue tail register, must be a multiple of 8.*/
+ rxq->rx_tail = rxq->nb_rx_desc - CPFL_HAIRPIN_Q_TAIL_AUX_VALUE;
+
+ return 0;
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
@@ -1063,22 +1140,31 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
IDPF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
} else {
/* Split queue */
- err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
- return err;
- }
- err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
- return err;
+ if (cpfl_rxq->hairpin_info.hairpin_q) {
+ err = cpfl_alloc_split_p2p_rxq_mbufs(rxq->bufq1);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate p2p RX buffer queue mbuf");
+ return err;
+ }
+ } else {
+ err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
+ return err;
+ }
+ err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
+ return err;
+ }
}
rte_wmb();
/* Init the RX tail register. */
IDPF_PCI_REG_WRITE(rxq->bufq1->qrx_tail, rxq->bufq1->rx_tail);
- IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail);
+ if (rxq->bufq2)
+ IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail);
}
return err;
@@ -1185,7 +1271,12 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return -EINVAL;
cpfl_rxq = dev->data->rx_queues[rx_queue_id];
- err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ rx_queue_id - cpfl_vport->nb_data_txq,
+ true, false);
+ else
+ err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
rx_queue_id);
@@ -1199,10 +1290,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
idpf_qc_single_rx_queue_reset(rxq);
} else {
rxq->bufq1->ops->release_mbufs(rxq->bufq1);
- rxq->bufq2->ops->release_mbufs(rxq->bufq2);
- idpf_qc_split_rx_queue_reset(rxq);
+ if (rxq->bufq2)
+ rxq->bufq2->ops->release_mbufs(rxq->bufq2);
+ if (cpfl_rxq->hairpin_info.hairpin_q) {
+ cpfl_rx_hairpin_descq_reset(rxq);
+ cpfl_rx_hairpin_bufq_reset(rxq->bufq1);
+ } else {
+ idpf_qc_split_rx_queue_reset(rxq);
+ }
}
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ if (!cpfl_rxq->hairpin_info.hairpin_q)
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1221,7 +1319,12 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
cpfl_txq = dev->data->tx_queues[tx_queue_id];
- err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
+ if (cpfl_txq->hairpin_info.hairpin_q)
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ tx_queue_id - cpfl_vport->nb_data_txq,
+ false, false);
+ else
+ err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
tx_queue_id);
@@ -1234,10 +1337,17 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
idpf_qc_single_tx_queue_reset(txq);
} else {
- idpf_qc_split_tx_descq_reset(txq);
- idpf_qc_split_tx_complq_reset(txq->complq);
+ if (cpfl_txq->hairpin_info.hairpin_q) {
+ cpfl_tx_hairpin_descq_reset(txq);
+ cpfl_tx_hairpin_complq_reset(txq->complq);
+ } else {
+ idpf_qc_split_tx_descq_reset(txq);
+ idpf_qc_split_tx_complq_reset(txq->complq);
+ }
}
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ if (!cpfl_txq->hairpin_info.hairpin_q)
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1257,10 +1367,17 @@ cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
void
cpfl_stop_queues(struct rte_eth_dev *dev)
{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
struct cpfl_rx_queue *cpfl_rxq;
struct cpfl_tx_queue *cpfl_txq;
int i;
+ if (cpfl_vport->p2p_rx_bufq != NULL) {
+ if (cpfl_switch_hairpin_bufq_complq(cpfl_vport, false) != 0)
+ PMD_DRV_LOG(ERR, "Failed to stop hairpin Tx complq and Rx bufq");
+ }
+
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
if (cpfl_rxq == NULL)
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index b01ce5edf9..87603e161e 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -39,6 +39,17 @@
#define CPFL_RX_BUF_STRIDE 64
+/* The value written in the RX buffer queue tail register,
+ * and in WritePTR field in the TX completion queue context,
+ * must be a multiple of 8.
+ */
+#define CPFL_HAIRPIN_Q_TAIL_AUX_VALUE 8
+
+struct virtchnl2_p2p_rx_buf_desc {
+ __le64 reserve0;
+ __le64 pkt_addr; /* Packet buffer address */
+};
+
struct cpfl_rxq_hairpin_info {
bool hairpin_q; /* if rx queue is a hairpin queue */
bool manual_bind; /* for cross vport */
@@ -92,4 +103,7 @@ int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq);
int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
+int cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t qid,
+ bool rx, bool on);
#endif /* _CPFL_RXTX_H_ */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH 08/10] net/cpfl: enable write back based on ITR expire
2023-04-21 6:50 [PATCH 00/10] add hairpin queue support beilei.xing
` (6 preceding siblings ...)
2023-04-21 6:50 ` [PATCH 07/10] net/cpfl: support hairpin queue start/stop beilei.xing
@ 2023-04-21 6:50 ` beilei.xing
2023-04-21 6:50 ` [PATCH 09/10] net/cpfl: support peer ports get beilei.xing
` (2 subsequent siblings)
10 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-04-21 6:50 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch enabls write back based on ITR expire
(WR_ON_ITR) for hairpin queue.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_device.c | 75 ++++++++++++++++++++++++
drivers/common/idpf/idpf_common_device.h | 4 ++
drivers/common/idpf/version.map | 1 +
drivers/net/cpfl/cpfl_ethdev.c | 13 +++-
4 files changed, 92 insertions(+), 1 deletion(-)
diff --git a/drivers/common/idpf/idpf_common_device.c b/drivers/common/idpf/idpf_common_device.c
index 3b58bdd41e..86a4a54f9b 100644
--- a/drivers/common/idpf/idpf_common_device.c
+++ b/drivers/common/idpf/idpf_common_device.c
@@ -559,6 +559,81 @@ idpf_vport_irq_map_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
return ret;
}
+int
+idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport, uint32_t *qids, uint16_t nb_rx_queues)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_queue_vector *qv_map;
+ struct idpf_hw *hw = &adapter->hw;
+ uint32_t dynctl_val, itrn_val;
+ uint32_t dynctl_reg_start;
+ uint32_t itrn_reg_start;
+ uint16_t i;
+ int ret;
+
+ qv_map = rte_zmalloc("qv_map",
+ nb_rx_queues *
+ sizeof(struct virtchnl2_queue_vector), 0);
+ if (qv_map == NULL) {
+ DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
+ nb_rx_queues);
+ ret = -ENOMEM;
+ goto qv_map_alloc_err;
+ }
+
+ /* Rx interrupt disabled, Map interrupt only for writeback */
+
+ /* The capability flags adapter->caps.other_caps should be
+ * compared with bit VIRTCHNL2_CAP_WB_ON_ITR here. The if
+ * condition should be updated when the FW can return the
+ * correct flag bits.
+ */
+ dynctl_reg_start =
+ vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
+ itrn_reg_start =
+ vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
+ dynctl_val = IDPF_READ_REG(hw, dynctl_reg_start);
+ DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x", dynctl_val);
+ itrn_val = IDPF_READ_REG(hw, itrn_reg_start);
+ DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val);
+ /* Force write-backs by setting WB_ON_ITR bit in DYN_CTL
+ * register. WB_ON_ITR and INTENA are mutually exclusive
+ * bits. Setting WB_ON_ITR bits means TX and RX Descs
+ * are written back based on ITR expiration irrespective
+ * of INTENA setting.
+ */
+ /* TBD: need to tune INTERVAL value for better performance. */
+ itrn_val = (itrn_val == 0) ? IDPF_DFLT_INTERVAL : itrn_val;
+ dynctl_val = VIRTCHNL2_ITR_IDX_0 <<
+ PF_GLINT_DYN_CTL_ITR_INDX_S |
+ PF_GLINT_DYN_CTL_WB_ON_ITR_M |
+ itrn_val << PF_GLINT_DYN_CTL_INTERVAL_S;
+ IDPF_WRITE_REG(hw, dynctl_reg_start, dynctl_val);
+
+ for (i = 0; i < nb_rx_queues; i++) {
+ /* map all queues to the same vector */
+ qv_map[i].queue_id = qids[i];
+ qv_map[i].vector_id =
+ vport->recv_vectors->vchunks.vchunks->start_vector_id;
+ }
+ vport->qv_map = qv_map;
+
+ ret = idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, true);
+ if (ret != 0) {
+ DRV_LOG(ERR, "config interrupt mapping failed");
+ goto config_irq_map_err;
+ }
+
+ return 0;
+
+config_irq_map_err:
+ rte_free(vport->qv_map);
+ vport->qv_map = NULL;
+
+qv_map_alloc_err:
+ return ret;
+}
+
int
idpf_vport_irq_unmap_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
{
diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index 7cf2355bc9..1aa9d9516f 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -212,5 +212,9 @@ int idpf_vport_info_init(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_info);
__rte_internal
void idpf_vport_stats_update(struct virtchnl2_vport_stats *oes, struct virtchnl2_vport_stats *nes);
+__rte_internal
+int idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport,
+ uint32_t *qids,
+ uint16_t nb_rx_queues);
#endif /* _IDPF_COMMON_DEVICE_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 0e87dba2ae..e3a7ef0daa 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -74,6 +74,7 @@ INTERNAL {
idpf_vport_info_init;
idpf_vport_init;
idpf_vport_irq_map_config;
+ idpf_vport_irq_map_config_by_qids;
idpf_vport_irq_unmap_config;
idpf_vport_rss_config;
idpf_vport_stats_update;
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index f154c83f27..008686bfd4 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -730,11 +730,22 @@ cpfl_dev_configure(struct rte_eth_dev *dev)
static int
cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
{
+ uint32_t qids[CPFL_MAX_P2P_NB_QUEUES + IDPF_DEFAULT_RXQ_NUM] = {0};
struct cpfl_vport *cpfl_vport = dev->data->dev_private;
struct idpf_vport *vport = &cpfl_vport->base;
uint16_t nb_rx_queues = dev->data->nb_rx_queues;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i;
- return idpf_vport_irq_map_config(vport, nb_rx_queues);
+ for (i = 0; i < nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ qids[i] = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info.rx_start_qid,
+ (i - cpfl_vport->nb_data_rxq));
+ else
+ qids[i] = cpfl_hw_qid_get(vport->chunks_info.rx_start_qid, i);
+ }
+ return idpf_vport_irq_map_config_by_qids(vport, qids, nb_rx_queues);
}
/* Update hairpin_info for dev's tx hairpin queue */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH 09/10] net/cpfl: support peer ports get
2023-04-21 6:50 [PATCH 00/10] add hairpin queue support beilei.xing
` (7 preceding siblings ...)
2023-04-21 6:50 ` [PATCH 08/10] net/cpfl: enable write back based on ITR expire beilei.xing
@ 2023-04-21 6:50 ` beilei.xing
2023-04-21 6:50 ` [PATCH 10/10] net/cpfl: support hairpin bind/unbind beilei.xing
2023-05-19 5:10 ` [PATCH v2 00/10] add hairpin queue support beilei.xing
10 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-04-21 6:50 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports get hairpin peer ports.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 34 ++++++++++++++++++++++++++++++++++
1 file changed, 34 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 008686bfd4..52c4ab601f 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1074,6 +1074,39 @@ cpfl_dev_close(struct rte_eth_dev *dev)
return 0;
}
+static int
+cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
+ __rte_unused size_t len, uint32_t tx)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_tx_queue *txq;
+ struct idpf_rx_queue *rxq;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i, j;
+
+ if (tx > 0) {
+ for (i = cpfl_vport->nb_data_txq, j = 0; i < dev->data->nb_tx_queues; i++, j++) {
+ txq = dev->data->tx_queues[i];
+ if (txq == NULL)
+ return -EINVAL;
+ cpfl_txq = (struct cpfl_tx_queue *)txq;
+ peer_ports[j] = cpfl_txq->hairpin_info.peer_rxp;
+ }
+ } else if (tx == 0) {
+ for (i = cpfl_vport->nb_data_rxq, j = 0; i < dev->data->nb_rx_queues; i++, j++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq == NULL)
+ return -EINVAL;
+ cpfl_rxq = (struct cpfl_rx_queue *)rxq;
+ peer_ports[j] = cpfl_rxq->hairpin_info.peer_txp;
+ }
+ }
+
+ return j;
+}
+
static const struct eth_dev_ops cpfl_eth_dev_ops = {
.dev_configure = cpfl_dev_configure,
.dev_close = cpfl_dev_close,
@@ -1103,6 +1136,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.hairpin_cap_get = cpfl_hairpin_cap_get,
.rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
.tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
+ .hairpin_get_peer_ports = cpfl_hairpin_get_peer_ports,
};
static int
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH 10/10] net/cpfl: support hairpin bind/unbind
2023-04-21 6:50 [PATCH 00/10] add hairpin queue support beilei.xing
` (8 preceding siblings ...)
2023-04-21 6:50 ` [PATCH 09/10] net/cpfl: support peer ports get beilei.xing
@ 2023-04-21 6:50 ` beilei.xing
2023-05-19 5:10 ` [PATCH v2 00/10] add hairpin queue support beilei.xing
10 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-04-21 6:50 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports hairpin_bind/unbind ops.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 137 +++++++++++++++++++++++++++++++++
drivers/net/cpfl/cpfl_rxtx.c | 28 +++++++
drivers/net/cpfl/cpfl_rxtx.h | 2 +
3 files changed, 167 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 52c4ab601f..ddafc2f9e5 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1107,6 +1107,141 @@ cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
return j;
}
+static int
+cpfl_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct idpf_vport *tx_vport = &cpfl_tx_vport->base;
+ struct cpfl_vport *cpfl_rx_vport;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct rte_eth_dev *peer_dev;
+ struct idpf_vport *rx_vport;
+ int err = 0;
+ int i;
+
+ err = cpfl_txq_hairpin_info_update(dev, rx_port);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to update Tx hairpin queue info.");
+ return err;
+ }
+
+ /* configure hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ err = cpfl_hairpin_txq_config(tx_vport, cpfl_txq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u", i);
+ return err;
+ }
+ }
+
+ err = cpfl_hairpin_tx_complq_config(cpfl_tx_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Tx completion queue");
+ return err;
+ }
+
+ peer_dev = &rte_eth_devices[rx_port];
+ cpfl_rx_vport = (struct cpfl_vport *)peer_dev->data->dev_private;
+ rx_vport = &cpfl_rx_vport->base;
+ cpfl_rxq_hairpin_mz_bind(peer_dev);
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ err = cpfl_hairpin_rxq_config(rx_vport, cpfl_rxq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue %u", i);
+ return err;
+ }
+ err = cpfl_rx_queue_init(peer_dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u", i);
+ return err;
+ }
+ }
+
+ err = cpfl_hairpin_rx_bufq_config(cpfl_rx_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue");
+ return err;
+ }
+
+ /* enable hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport,
+ i - cpfl_tx_vport->nb_data_txq,
+ false, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u on",
+ i);
+ return err;
+ }
+ cpfl_txq->base.q_started = true;
+ }
+
+ err = cpfl_switch_hairpin_complq(cpfl_tx_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq");
+ return err;
+ }
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport,
+ i - cpfl_rx_vport->nb_data_rxq,
+ true, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u on",
+ i);
+ }
+ cpfl_rxq->base.q_started = true;
+ }
+
+ err = cpfl_switch_hairpin_bufq(cpfl_rx_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Rx buffer queue");
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+cpfl_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port];
+ struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i;
+
+ /* disable hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport,
+ i - cpfl_tx_vport->nb_data_txq,
+ false, false);
+ cpfl_txq->base.q_started = false;
+ }
+
+ cpfl_switch_hairpin_complq(cpfl_tx_vport, false);
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport,
+ i - cpfl_rx_vport->nb_data_rxq,
+ true, false);
+ cpfl_rxq->base.q_started = false;
+ }
+
+ cpfl_switch_hairpin_bufq(cpfl_rx_vport, false);
+
+ return 0;
+}
+
static const struct eth_dev_ops cpfl_eth_dev_ops = {
.dev_configure = cpfl_dev_configure,
.dev_close = cpfl_dev_close,
@@ -1137,6 +1272,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
.tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
.hairpin_get_peer_ports = cpfl_hairpin_get_peer_ports,
+ .hairpin_bind = cpfl_hairpin_bind,
+ .hairpin_unbind = cpfl_hairpin_unbind,
};
static int
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index ed2d100c35..e025bd014f 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -1030,6 +1030,34 @@ cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool on)
return err;
}
+int
+cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ queue_id = cpfl_vport->p2p_tx_complq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
+int
+cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
int
cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t logic_qid,
bool rx, bool on)
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 87603e161e..60308e16b2 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -104,6 +104,8 @@ int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl
int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
int cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on);
int cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t qid,
bool rx, bool on);
#endif /* _CPFL_RXTX_H_ */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH 03/10] common/idpf: support queue groups add/delete
2023-04-21 6:50 ` [PATCH 03/10] common/idpf: support queue groups add/delete beilei.xing
@ 2023-04-24 8:48 ` Liu, Mingxia
2023-04-24 8:49 ` Liu, Mingxia
1 sibling, 0 replies; 164+ messages in thread
From: Liu, Mingxia @ 2023-04-24 8:48 UTC (permalink / raw)
To: Xing, Beilei, Wu, Jingjing; +Cc: dev
> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Friday, April 21, 2023 2:51 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Subject: [PATCH 03/10] common/idpf: support queue groups add/delete
>
> From: Beilei Xing <beilei.xing@intel.com>
>
> This patch adds queue group add/delete virtual channel support.
>
> Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/common/idpf/idpf_common_virtchnl.c | 66
> ++++++++++++++++++++++
> drivers/common/idpf/idpf_common_virtchnl.h | 9 +++
> drivers/common/idpf/version.map | 2 +
> 3 files changed, 77 insertions(+)
>
> diff --git a/drivers/common/idpf/idpf_common_virtchnl.c
> b/drivers/common/idpf/idpf_common_virtchnl.c
> index a4e129062e..76a658bb26 100644
> --- a/drivers/common/idpf/idpf_common_virtchnl.c
> +++ b/drivers/common/idpf/idpf_common_virtchnl.c
> @@ -359,6 +359,72 @@ idpf_vc_vport_destroy(struct idpf_vport *vport)
> return err;
> }
>
> +int
> +idpf_vc_queue_grps_add(struct idpf_vport *vport,
> + struct virtchnl2_add_queue_groups
> *ptp_queue_grps_info,
> + uint8_t *ptp_queue_grps_out)
[Liu, Mingxia] Better to unify the abbreviation of "port to port" , this patch p2p is used, in the next patch p2p is used.
> +{
> + struct idpf_adapter *adapter = vport->adapter;
> + struct idpf_cmd_info args;
> + int size, qg_info_size;
> + int err = -1;
> +
> + size = sizeof(*ptp_queue_grps_info) +
> + (ptp_queue_grps_info->qg_info.num_queue_groups - 1) *
> + sizeof(struct virtchnl2_queue_group_info);
> +
> + memset(&args, 0, sizeof(args));
> + args.ops = VIRTCHNL2_OP_ADD_QUEUE_GROUPS;
> + args.in_args = (uint8_t *)ptp_queue_grps_info;
> + args.in_args_size = size;
> + args.out_buffer = adapter->mbx_resp;
> + args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> +
> + err = idpf_vc_cmd_execute(adapter, &args);
> + if (err != 0) {
> + DRV_LOG(ERR,
> + "Failed to execute command of
> VIRTCHNL2_OP_ADD_QUEUE_GROUPS");
> + return err;
> + }
> +
> + rte_memcpy(ptp_queue_grps_out, args.out_buffer,
> IDPF_DFLT_MBX_BUF_SIZE);
> + return 0;
> +}
> +
> +int idpf_vc_queue_grps_del(struct idpf_vport *vport,
> + uint16_t num_q_grps,
> + struct virtchnl2_queue_group_id *qg_ids) {
> + struct idpf_adapter *adapter = vport->adapter;
> + struct virtchnl2_delete_queue_groups *vc_del_q_grps;
> + struct idpf_cmd_info args;
> + int size;
> + int err;
> +
> + size = sizeof(*vc_del_q_grps) +
> + (num_q_grps - 1) * sizeof(struct virtchnl2_queue_group_id);
> + vc_del_q_grps = rte_zmalloc("vc_del_q_grps", size, 0);
> +
> + vc_del_q_grps->vport_id = vport->vport_id;
> + vc_del_q_grps->num_queue_groups = num_q_grps;
> + memcpy(vc_del_q_grps->qg_ids, qg_ids,
> + num_q_grps * sizeof(struct virtchnl2_queue_group_id));
> +
> + memset(&args, 0, sizeof(args));
> + args.ops = VIRTCHNL2_OP_DEL_QUEUE_GROUPS;
> + args.in_args = (uint8_t *)vc_del_q_grps;
> + args.in_args_size = size;
> + args.out_buffer = adapter->mbx_resp;
> + args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> +
> + err = idpf_vc_cmd_execute(adapter, &args);
> + if (err != 0)
> + DRV_LOG(ERR, "Failed to execute command of
> +VIRTCHNL2_OP_DEL_QUEUE_GROUPS");
> +
> + rte_free(vc_del_q_grps);
> + return err;
> +}
> +
> int
> idpf_vc_rss_key_set(struct idpf_vport *vport) { diff --git
> a/drivers/common/idpf/idpf_common_virtchnl.h
> b/drivers/common/idpf/idpf_common_virtchnl.h
> index d479d93c8e..bf1d014c8d 100644
> --- a/drivers/common/idpf/idpf_common_virtchnl.h
> +++ b/drivers/common/idpf/idpf_common_virtchnl.h
> @@ -64,4 +64,13 @@ int idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16
> *num_q_msg, __rte_internal int idpf_vc_ctlq_post_rx_buffs(struct
> idpf_hw *hw, struct idpf_ctlq_info *cq,
> u16 *buff_count, struct idpf_dma_mem **buffs);
> +__rte_internal
> +int idpf_vc_queue_grps_del(struct idpf_vport *vport,
> + uint16_t num_q_grps,
> + struct virtchnl2_queue_group_id *qg_ids);
> __rte_internal int
> +idpf_vc_queue_grps_add(struct idpf_vport *vport,
> + struct virtchnl2_add_queue_groups
> *ptp_queue_grps_info,
> + uint8_t *ptp_queue_grps_out);
> #endif /* _IDPF_COMMON_VIRTCHNL_H_ */
> diff --git a/drivers/common/idpf/version.map
> b/drivers/common/idpf/version.map index 7076759024..aa67f7ee27
> 100644
> --- a/drivers/common/idpf/version.map
> +++ b/drivers/common/idpf/version.map
> @@ -48,6 +48,8 @@ INTERNAL {
> idpf_vc_irq_map_unmap_config;
> idpf_vc_one_msg_read;
> idpf_vc_ptype_info_query;
> + idpf_vc_queue_grps_add;
> + idpf_vc_queue_grps_del;
> idpf_vc_queue_switch;
> idpf_vc_queues_ena_dis;
> idpf_vc_rss_hash_get;
> --
> 2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH 03/10] common/idpf: support queue groups add/delete
2023-04-21 6:50 ` [PATCH 03/10] common/idpf: support queue groups add/delete beilei.xing
2023-04-24 8:48 ` Liu, Mingxia
@ 2023-04-24 8:49 ` Liu, Mingxia
2023-05-19 5:36 ` Xing, Beilei
1 sibling, 1 reply; 164+ messages in thread
From: Liu, Mingxia @ 2023-04-24 8:49 UTC (permalink / raw)
To: Xing, Beilei, Wu, Jingjing; +Cc: dev
> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Friday, April 21, 2023 2:51 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Subject: [PATCH 03/10] common/idpf: support queue groups add/delete
>
> From: Beilei Xing <beilei.xing@intel.com>
>
> This patch adds queue group add/delete virtual channel support.
>
> Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/common/idpf/idpf_common_virtchnl.c | 66
> ++++++++++++++++++++++
> drivers/common/idpf/idpf_common_virtchnl.h | 9 +++
> drivers/common/idpf/version.map | 2 +
> 3 files changed, 77 insertions(+)
>
> diff --git a/drivers/common/idpf/idpf_common_virtchnl.c
> b/drivers/common/idpf/idpf_common_virtchnl.c
> index a4e129062e..76a658bb26 100644
> --- a/drivers/common/idpf/idpf_common_virtchnl.c
> +++ b/drivers/common/idpf/idpf_common_virtchnl.c
> @@ -359,6 +359,72 @@ idpf_vc_vport_destroy(struct idpf_vport *vport)
> return err;
> }
>
> +int
> +idpf_vc_queue_grps_add(struct idpf_vport *vport,
> + struct virtchnl2_add_queue_groups
> *ptp_queue_grps_info,
> + uint8_t *ptp_queue_grps_out)
[Liu, Mingxia] Better to unify the abbreviation of "port to port" , this patch ptp is used, in the next patch p2p is used.
> +{
> + struct idpf_adapter *adapter = vport->adapter;
> + struct idpf_cmd_info args;
> + int size, qg_info_size;
> + int err = -1;
> +
> + size = sizeof(*ptp_queue_grps_info) +
> + (ptp_queue_grps_info->qg_info.num_queue_groups - 1) *
> + sizeof(struct virtchnl2_queue_group_info);
> +
> + memset(&args, 0, sizeof(args));
> + args.ops = VIRTCHNL2_OP_ADD_QUEUE_GROUPS;
> + args.in_args = (uint8_t *)ptp_queue_grps_info;
> + args.in_args_size = size;
> + args.out_buffer = adapter->mbx_resp;
> + args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> +
> + err = idpf_vc_cmd_execute(adapter, &args);
> + if (err != 0) {
> + DRV_LOG(ERR,
> + "Failed to execute command of
> VIRTCHNL2_OP_ADD_QUEUE_GROUPS");
> + return err;
> + }
> +
> + rte_memcpy(ptp_queue_grps_out, args.out_buffer,
> IDPF_DFLT_MBX_BUF_SIZE);
> + return 0;
> +}
> +
> +int idpf_vc_queue_grps_del(struct idpf_vport *vport,
> + uint16_t num_q_grps,
> + struct virtchnl2_queue_group_id *qg_ids) {
> + struct idpf_adapter *adapter = vport->adapter;
> + struct virtchnl2_delete_queue_groups *vc_del_q_grps;
> + struct idpf_cmd_info args;
> + int size;
> + int err;
> +
> + size = sizeof(*vc_del_q_grps) +
> + (num_q_grps - 1) * sizeof(struct virtchnl2_queue_group_id);
> + vc_del_q_grps = rte_zmalloc("vc_del_q_grps", size, 0);
> +
> + vc_del_q_grps->vport_id = vport->vport_id;
> + vc_del_q_grps->num_queue_groups = num_q_grps;
> + memcpy(vc_del_q_grps->qg_ids, qg_ids,
> + num_q_grps * sizeof(struct virtchnl2_queue_group_id));
> +
> + memset(&args, 0, sizeof(args));
> + args.ops = VIRTCHNL2_OP_DEL_QUEUE_GROUPS;
> + args.in_args = (uint8_t *)vc_del_q_grps;
> + args.in_args_size = size;
> + args.out_buffer = adapter->mbx_resp;
> + args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> +
> + err = idpf_vc_cmd_execute(adapter, &args);
> + if (err != 0)
> + DRV_LOG(ERR, "Failed to execute command of
> +VIRTCHNL2_OP_DEL_QUEUE_GROUPS");
> +
> + rte_free(vc_del_q_grps);
> + return err;
> +}
> +
> int
> idpf_vc_rss_key_set(struct idpf_vport *vport) { diff --git
> a/drivers/common/idpf/idpf_common_virtchnl.h
> b/drivers/common/idpf/idpf_common_virtchnl.h
> index d479d93c8e..bf1d014c8d 100644
> --- a/drivers/common/idpf/idpf_common_virtchnl.h
> +++ b/drivers/common/idpf/idpf_common_virtchnl.h
> @@ -64,4 +64,13 @@ int idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16
> *num_q_msg, __rte_internal int idpf_vc_ctlq_post_rx_buffs(struct
> idpf_hw *hw, struct idpf_ctlq_info *cq,
> u16 *buff_count, struct idpf_dma_mem **buffs);
> +__rte_internal
> +int idpf_vc_queue_grps_del(struct idpf_vport *vport,
> + uint16_t num_q_grps,
> + struct virtchnl2_queue_group_id *qg_ids);
> __rte_internal int
> +idpf_vc_queue_grps_add(struct idpf_vport *vport,
> + struct virtchnl2_add_queue_groups
> *ptp_queue_grps_info,
> + uint8_t *ptp_queue_grps_out);
> #endif /* _IDPF_COMMON_VIRTCHNL_H_ */
> diff --git a/drivers/common/idpf/version.map
> b/drivers/common/idpf/version.map index 7076759024..aa67f7ee27
> 100644
> --- a/drivers/common/idpf/version.map
> +++ b/drivers/common/idpf/version.map
> @@ -48,6 +48,8 @@ INTERNAL {
> idpf_vc_irq_map_unmap_config;
> idpf_vc_one_msg_read;
> idpf_vc_ptype_info_query;
> + idpf_vc_queue_grps_add;
> + idpf_vc_queue_grps_del;
> idpf_vc_queue_switch;
> idpf_vc_queues_ena_dis;
> idpf_vc_rss_hash_get;
> --
> 2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH 04/10] net/cpfl: add haipin queue group during vpotr init
2023-04-21 6:50 ` [PATCH 04/10] net/cpfl: add haipin queue group during vpotr init beilei.xing
@ 2023-04-24 8:55 ` Liu, Mingxia
2023-05-19 5:36 ` Xing, Beilei
0 siblings, 1 reply; 164+ messages in thread
From: Liu, Mingxia @ 2023-04-24 8:55 UTC (permalink / raw)
To: Xing, Beilei, Wu, Jingjing; +Cc: dev
> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Friday, April 21, 2023 2:51 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Subject: [PATCH 04/10] net/cpfl: add haipin queue group during vpotr init
[Liu, Mingxia] vpotr , spelling error?
>
> From: Beilei Xing <beilei.xing@intel.com>
>
> This patch adds haipin queue group during vpotr init.
>
> Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/net/cpfl/cpfl_ethdev.c | 125
> +++++++++++++++++++++++++++++++++ drivers/net/cpfl/cpfl_ethdev.h |
> 17 +++++
> drivers/net/cpfl/cpfl_rxtx.h | 4 ++
> 3 files changed, 146 insertions(+)
>
> diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
> index 114fc18f5f..ad5ddebd3a 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.c
> +++ b/drivers/net/cpfl/cpfl_ethdev.c
> @@ -856,6 +856,20 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
> return 0;
> }
>
> +static int
> +cpfl_p2p_queue_grps_del(struct idpf_vport *vport) {
> + struct virtchnl2_queue_group_id
> qg_ids[CPFL_P2P_NB_QUEUE_GRPS] = {0};
> + int ret = 0;
> +
> + qg_ids[0].queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
> + qg_ids[0].queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
> + ret = idpf_vc_queue_grps_del(vport, CPFL_P2P_NB_QUEUE_GRPS,
> qg_ids);
> + if (ret)
> + PMD_DRV_LOG(ERR, "Failed to delete p2p queue groups");
> + return ret;
> +}
> +
> static int
> cpfl_dev_close(struct rte_eth_dev *dev) { @@ -864,6 +878,9 @@
> cpfl_dev_close(struct rte_eth_dev *dev)
> struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport-
> >adapter);
>
> cpfl_dev_stop(dev);
> +
> + cpfl_p2p_queue_grps_del(vport);
> +
> idpf_vport_deinit(vport);
>
> adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id); @@ -
> 1350,6 +1367,96 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext
> *adapter)
> return vport_idx;
> }
>
> +static int
> +cpfl_p2p_q_grps_add(struct idpf_vport *vport,
> + struct virtchnl2_add_queue_groups
> *p2p_queue_grps_info,
> + uint8_t *p2p_q_vc_out_info)
> +{
> + int ret;
> +
> + p2p_queue_grps_info->vport_id = vport->vport_id;
> + p2p_queue_grps_info->qg_info.num_queue_groups =
> CPFL_P2P_NB_QUEUE_GRPS;
> + p2p_queue_grps_info->qg_info.groups[0].num_rx_q =
> CPFL_MAX_P2P_NB_QUEUES;
> + p2p_queue_grps_info->qg_info.groups[0].num_rx_bufq =
> CPFL_P2P_NB_RX_BUFQ;
> + p2p_queue_grps_info->qg_info.groups[0].num_tx_q =
> CPFL_MAX_P2P_NB_QUEUES;
> + p2p_queue_grps_info->qg_info.groups[0].num_tx_complq =
> CPFL_P2P_NB_TX_COMPLQ;
> + p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_id =
> CPFL_P2P_QUEUE_GRP_ID;
> + p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_type
> = VIRTCHNL2_QUEUE_GROUP_P2P;
> + p2p_queue_grps_info-
> >qg_info.groups[0].rx_q_grp_info.rss_lut_size = 0;
> + p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.tx_tc = 0;
> + p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.priority =
> 0;
> + p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.is_sp = 0;
> + p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.pir_weight
> = 0;
> +
> + ret = idpf_vc_queue_grps_add(vport, p2p_queue_grps_info,
> p2p_q_vc_out_info);
> + if (ret != 0) {
> + PMD_DRV_LOG(ERR, "Failed to add p2p queue groups.");
> + return ret;
> + }
> +
> + return ret;
> +}
> +
> +static int
> +cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
> + struct virtchnl2_add_queue_groups
> *p2p_q_vc_out_info) {
> + struct p2p_queue_chunks_info *p2p_q_chunks_info =
> &cpfl_vport->p2p_q_chunks_info;
> + struct virtchnl2_queue_reg_chunks *vc_chunks_out;
> + int i, type;
> +
> + if (p2p_q_vc_out_info-
> >qg_info.groups[0].qg_id.queue_group_type !=
> + VIRTCHNL2_QUEUE_GROUP_P2P) {
> + PMD_DRV_LOG(ERR, "Add queue group response
> mismatch.");
> + return -EINVAL;
> + }
> +
> + vc_chunks_out = &p2p_q_vc_out_info->qg_info.groups[0].chunks;
> +
> + for (i = 0; i < vc_chunks_out->num_chunks; i++) {
> + type = vc_chunks_out->chunks[i].type;
> + switch (type) {
> + case VIRTCHNL2_QUEUE_TYPE_TX:
> + p2p_q_chunks_info->tx_start_qid =
> + vc_chunks_out->chunks[i].start_queue_id;
> + p2p_q_chunks_info->tx_qtail_start =
> + vc_chunks_out->chunks[i].qtail_reg_start;
> + p2p_q_chunks_info->tx_qtail_spacing =
> + vc_chunks_out->chunks[i].qtail_reg_spacing;
> + break;
> + case VIRTCHNL2_QUEUE_TYPE_RX:
> + p2p_q_chunks_info->rx_start_qid =
> + vc_chunks_out->chunks[i].start_queue_id;
> + p2p_q_chunks_info->rx_qtail_start =
> + vc_chunks_out->chunks[i].qtail_reg_start;
> + p2p_q_chunks_info->rx_qtail_spacing =
> + vc_chunks_out->chunks[i].qtail_reg_spacing;
> + break;
> + case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
> + p2p_q_chunks_info->tx_compl_start_qid =
> + vc_chunks_out->chunks[i].start_queue_id;
> + p2p_q_chunks_info->tx_compl_qtail_start =
> + vc_chunks_out->chunks[i].qtail_reg_start;
> + p2p_q_chunks_info->tx_compl_qtail_spacing =
> + vc_chunks_out->chunks[i].qtail_reg_spacing;
> + break;
> + case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
> + p2p_q_chunks_info->rx_buf_start_qid =
> + vc_chunks_out->chunks[i].start_queue_id;
> + p2p_q_chunks_info->rx_buf_qtail_start =
> + vc_chunks_out->chunks[i].qtail_reg_start;
> + p2p_q_chunks_info->rx_buf_qtail_spacing =
> + vc_chunks_out->chunks[i].qtail_reg_spacing;
> + break;
> + default:
> + PMD_DRV_LOG(ERR, "Unsupported queue type");
> + break;
> + }
> + }
> +
> + return 0;
> +}
> +
> static int
> cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params) { @@ -
> 1359,6 +1466,8 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void
> *init_params)
> struct cpfl_adapter_ext *adapter = param->adapter;
> /* for sending create vport virtchnl msg prepare */
> struct virtchnl2_create_vport create_vport_info;
> + struct virtchnl2_add_queue_groups p2p_queue_grps_info;
> + uint8_t p2p_q_vc_out_info[IDPF_DFLT_MBX_BUF_SIZE] = {0};
> int ret = 0;
>
> dev->dev_ops = &cpfl_eth_dev_ops;
> @@ -1380,6 +1489,19 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev,
> void *init_params)
> goto err;
> }
>
> + memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
> + ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info,
> p2p_q_vc_out_info);
> + if (ret != 0) {
> + PMD_INIT_LOG(ERR, "Failed to add p2p queue group.");
> + goto err_q_grps_add;
> + }
> + ret = cpfl_p2p_queue_info_init(cpfl_vport,
> + (struct virtchnl2_add_queue_groups
> *)p2p_q_vc_out_info);
> + if (ret != 0) {
> + PMD_INIT_LOG(ERR, "Failed to init p2p queue info.");
> + goto err_p2p_qinfo_init;
> + }
> +
> adapter->vports[param->idx] = cpfl_vport;
> adapter->cur_vports |= RTE_BIT32(param->devarg_id);
> adapter->cur_vport_nb++;
> @@ -1397,6 +1519,9 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev,
> void *init_params)
> return 0;
>
> err_mac_addrs:
> +err_p2p_qinfo_init:
> + cpfl_p2p_queue_grps_del(vport);
> +err_q_grps_add:
> adapter->vports[param->idx] = NULL; /* reset */
> idpf_vport_deinit(vport);
> adapter->cur_vports &= ~RTE_BIT32(param->devarg_id); diff --git
> a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h index
> 81fe9ac4c3..5e2e7a1bfb 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.h
> +++ b/drivers/net/cpfl/cpfl_ethdev.h
> @@ -69,8 +69,25 @@ struct cpfl_devargs {
> uint16_t req_vport_nb;
> };
>
> +struct p2p_queue_chunks_info {
> + uint32_t tx_start_qid;
> + uint32_t rx_start_qid;
> + uint32_t tx_compl_start_qid;
> + uint32_t rx_buf_start_qid;
> +
> + uint64_t tx_qtail_start;
> + uint32_t tx_qtail_spacing;
> + uint64_t rx_qtail_start;
> + uint32_t rx_qtail_spacing;
> + uint64_t tx_compl_qtail_start;
> + uint32_t tx_compl_qtail_spacing;
> + uint64_t rx_buf_qtail_start;
> + uint32_t rx_buf_qtail_spacing;
> +};
> +
> struct cpfl_vport {
> struct idpf_vport base;
> + struct p2p_queue_chunks_info p2p_q_chunks_info;
> };
>
> struct cpfl_adapter_ext {
> diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h index
> b2b3537d10..3a87a1f4b3 100644
> --- a/drivers/net/cpfl/cpfl_rxtx.h
> +++ b/drivers/net/cpfl/cpfl_rxtx.h
> @@ -17,6 +17,10 @@
> #define CPFL_MAX_HAIRPINQ_TX_2_RX 1
> #define CPFL_MAX_HAIRPINQ_NB_DESC 1024
> #define CPFL_MAX_P2P_NB_QUEUES 16
> +#define CPFL_P2P_NB_RX_BUFQ 1
> +#define CPFL_P2P_NB_TX_COMPLQ 1
> +#define CPFL_P2P_NB_QUEUE_GRPS 1
> +#define CPFL_P2P_QUEUE_GRP_ID 1
> /* Base address of the HW descriptor ring should be 128B aligned. */
> #define CPFL_RING_BASE_ALIGN 128
>
> --
> 2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH 06/10] net/cpfl: support hairpin queue configuration
2023-04-21 6:50 ` [PATCH 06/10] net/cpfl: support hairpin queue configuration beilei.xing
@ 2023-04-24 9:48 ` Liu, Mingxia
2023-05-19 5:43 ` Xing, Beilei
0 siblings, 1 reply; 164+ messages in thread
From: Liu, Mingxia @ 2023-04-24 9:48 UTC (permalink / raw)
To: Xing, Beilei, Wu, Jingjing; +Cc: dev, Wang, Xiao W
> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Friday, April 21, 2023 2:51 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>
> Subject: [PATCH 06/10] net/cpfl: support hairpin queue configuration
>
> From: Beilei Xing <beilei.xing@intel.com>
>
> This patch supports Rx/Tx hairpin queue configuration.
>
> Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/common/idpf/idpf_common_virtchnl.c | 70 +++++++++++
> drivers/common/idpf/idpf_common_virtchnl.h | 6 +
> drivers/common/idpf/version.map | 2 +
> drivers/net/cpfl/cpfl_ethdev.c | 136 ++++++++++++++++++++-
> drivers/net/cpfl/cpfl_rxtx.c | 80 ++++++++++++
> drivers/net/cpfl/cpfl_rxtx.h | 7 ++
> 6 files changed, 297 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/common/idpf/idpf_common_virtchnl.c
> b/drivers/common/idpf/idpf_common_virtchnl.c
> index 76a658bb26..50cd43a8dd 100644
> --- a/drivers/common/idpf/idpf_common_virtchnl.c
> +++ b/drivers/common/idpf/idpf_common_virtchnl.c
> @@ -1050,6 +1050,41 @@ idpf_vc_rxq_config(struct idpf_vport *vport, struct
> idpf_rx_queue *rxq)
> return err;
> }
>
> +int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct
> virtchnl2_rxq_info *rxq_info,
> + uint16_t num_qs)
> +{
> + struct idpf_adapter *adapter = vport->adapter;
> + struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
> + struct idpf_cmd_info args;
> + int size, err, i;
> +
> + size = sizeof(*vc_rxqs) + (num_qs - 1) *
> + sizeof(struct virtchnl2_rxq_info);
> + vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
> + if (vc_rxqs == NULL) {
> + DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
> + err = -ENOMEM;
> + return err;
> + }
> + vc_rxqs->vport_id = vport->vport_id;
> + vc_rxqs->num_qinfo = num_qs;
> + memcpy(vc_rxqs->qinfo, rxq_info, num_qs * sizeof(struct
> +virtchnl2_rxq_info));
> +
> + memset(&args, 0, sizeof(args));
> + args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
> + args.in_args = (uint8_t *)vc_rxqs;
> + args.in_args_size = size;
> + args.out_buffer = adapter->mbx_resp;
> + args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> +
> + err = idpf_vc_cmd_execute(adapter, &args);
> + rte_free(vc_rxqs);
> + if (err != 0)
> + DRV_LOG(ERR, "Failed to execute command of
> +VIRTCHNL2_OP_CONFIG_RX_QUEUES");
> +
> + return err;
> +}
> +
> int
> idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq) { @@ -
> 1121,6 +1156,41 @@ idpf_vc_txq_config(struct idpf_vport *vport, struct
> idpf_tx_queue *txq)
> return err;
> }
>
> +int
> +idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info
> *txq_info,
> + uint16_t num_qs)
> +{
> + struct idpf_adapter *adapter = vport->adapter;
> + struct virtchnl2_config_tx_queues *vc_txqs = NULL;
> + struct idpf_cmd_info args;
> + int size, err;
> +
> + size = sizeof(*vc_txqs) + (num_qs - 1) * sizeof(struct virtchnl2_txq_info);
> + vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
> + if (vc_txqs == NULL) {
> + DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
> + err = -ENOMEM;
> + return err;
> + }
> + vc_txqs->vport_id = vport->vport_id;
> + vc_txqs->num_qinfo = num_qs;
> + memcpy(vc_txqs->qinfo, txq_info, num_qs * sizeof(struct
> +virtchnl2_txq_info));
> +
> + memset(&args, 0, sizeof(args));
> + args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
> + args.in_args = (uint8_t *)vc_txqs;
> + args.in_args_size = size;
> + args.out_buffer = adapter->mbx_resp;
> + args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> +
> + err = idpf_vc_cmd_execute(adapter, &args);
> + rte_free(vc_txqs);
> + if (err != 0)
> + DRV_LOG(ERR, "Failed to execute command of
> +VIRTCHNL2_OP_CONFIG_TX_QUEUES");
> +
> + return err;
> +}
> +
> int
> idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
> struct idpf_ctlq_msg *q_msg)
> diff --git a/drivers/common/idpf/idpf_common_virtchnl.h
> b/drivers/common/idpf/idpf_common_virtchnl.h
> index bf1d014c8d..277235ba7d 100644
> --- a/drivers/common/idpf/idpf_common_virtchnl.h
> +++ b/drivers/common/idpf/idpf_common_virtchnl.h
> @@ -65,6 +65,12 @@ __rte_internal
> int idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
> u16 *buff_count, struct idpf_dma_mem **buffs);
> __rte_internal
> +int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct
> virtchnl2_rxq_info *rxq_info,
> + uint16_t num_qs);
> +__rte_internal
> +int idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct
> virtchnl2_txq_info *txq_info,
> + uint16_t num_qs);
> +__rte_internal
> int idpf_vc_queue_grps_del(struct idpf_vport *vport,
> uint16_t num_q_grps,
> struct virtchnl2_queue_group_id *qg_ids); diff --git
> a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map index
> aa67f7ee27..a339a4bf8e 100644
> --- a/drivers/common/idpf/version.map
> +++ b/drivers/common/idpf/version.map
> @@ -59,8 +59,10 @@ INTERNAL {
> idpf_vc_rss_lut_get;
> idpf_vc_rss_lut_set;
> idpf_vc_rxq_config;
> + idpf_vc_rxq_config_by_info;
> idpf_vc_stats_query;
> idpf_vc_txq_config;
> + idpf_vc_txq_config_by_info;
> idpf_vc_vectors_alloc;
> idpf_vc_vectors_dealloc;
> idpf_vc_vport_create;
> diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index
> d3300f17cc..13edf2e706 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.c
> +++ b/drivers/net/cpfl/cpfl_ethdev.c
> @@ -737,32 +737,160 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
> return idpf_vport_irq_map_config(vport, nb_rx_queues); }
>
> +/* Update hairpin_info for dev's tx hairpin queue */ static int
> +cpfl_txq_hairpin_info_update(struct rte_eth_dev *dev, uint16_t rx_port)
> +{
> + struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
> + struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port];
> + struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private;
> + struct cpfl_txq_hairpin_info *hairpin_info;
> + struct cpfl_tx_queue *cpfl_txq;
> + int i;
> +
> + for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
> + cpfl_txq = dev->data->tx_queues[i];
> + hairpin_info = &cpfl_txq->hairpin_info;
> + if (hairpin_info->peer_rxp != rx_port) {
> + PMD_DRV_LOG(ERR, "port %d is not the peer port",
> rx_port);
> + return -EINVAL;
> + }
> + hairpin_info->peer_rxq_id =
> + cpfl_hw_qid_get(cpfl_rx_vport-
> >p2p_q_chunks_info.rx_start_qid,
> + hairpin_info->peer_rxq_id -
> cpfl_rx_vport->nb_data_rxq);
> + }
> +
> + return 0;
> +}
> +
> +/* Bind Rx hairpin queue's memory zone to peer Tx hairpin queue's
> +memory zone */ static void cpfl_rxq_hairpin_mz_bind(struct rte_eth_dev
> +*dev) {
> + struct cpfl_vport *cpfl_rx_vport = dev->data->dev_private;
> + struct idpf_vport *vport = &cpfl_rx_vport->base;
> + struct idpf_adapter *adapter = vport->adapter;
> + struct idpf_hw *hw = &adapter->hw;
> + struct cpfl_rx_queue *cpfl_rxq;
> + struct cpfl_tx_queue *cpfl_txq;
> + struct rte_eth_dev *peer_dev;
> + const struct rte_memzone *mz;
> + uint16_t peer_tx_port;
> + uint16_t peer_tx_qid;
> + int i;
> +
> + for (i = cpfl_rx_vport->nb_data_rxq; i < dev->data->nb_rx_queues; i++) {
> + cpfl_rxq = dev->data->rx_queues[i];
> + peer_tx_port = cpfl_rxq->hairpin_info.peer_txp;
> + peer_tx_qid = cpfl_rxq->hairpin_info.peer_txq_id;
> + peer_dev = &rte_eth_devices[peer_tx_port];
> + cpfl_txq = peer_dev->data->tx_queues[peer_tx_qid];
> +
> + /* bind rx queue */
> + mz = cpfl_txq->base.mz;
> + cpfl_rxq->base.rx_ring_phys_addr = mz->iova;
> + cpfl_rxq->base.rx_ring = mz->addr;
> + cpfl_rxq->base.mz = mz;
> +
> + /* bind rx buffer queue */
> + mz = cpfl_txq->base.complq->mz;
> + cpfl_rxq->base.bufq1->rx_ring_phys_addr = mz->iova;
> + cpfl_rxq->base.bufq1->rx_ring = mz->addr;
> + cpfl_rxq->base.bufq1->mz = mz;
> + cpfl_rxq->base.bufq1->qrx_tail = hw->hw_addr +
> + cpfl_hw_qtail_get(cpfl_rx_vport-
> >p2p_q_chunks_info.rx_buf_qtail_start,
> + 0, cpfl_rx_vport-
> >p2p_q_chunks_info.rx_buf_qtail_spacing);
> + }
> +}
> +
> static int
> cpfl_start_queues(struct rte_eth_dev *dev) {
> + struct cpfl_vport *cpfl_vport = dev->data->dev_private;
> + struct idpf_vport *vport = &cpfl_vport->base;
> struct cpfl_rx_queue *cpfl_rxq;
> struct cpfl_tx_queue *cpfl_txq;
> + int tx_cmplq_flag = 0;
> + int rx_bufq_flag = 0;
> + int flag = 0;
> int err = 0;
> int i;
>
> + /* For normal data queues, configure, init and enale Txq.
> + * For non-cross vport hairpin queues, configure Txq.
> + */
> for (i = 0; i < dev->data->nb_tx_queues; i++) {
> cpfl_txq = dev->data->tx_queues[i];
> if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
> continue;
> - err = cpfl_tx_queue_start(dev, i);
> + if (!cpfl_txq->hairpin_info.hairpin_q) {
> + err = cpfl_tx_queue_start(dev, i);
> + if (err != 0) {
> + PMD_DRV_LOG(ERR, "Fail to start Tx
> queue %u", i);
> + return err;
> + }
> + } else if (!cpfl_txq->hairpin_info.manual_bind) {
> + if (flag == 0) {
> + err = cpfl_txq_hairpin_info_update(dev,
> + cpfl_txq-
> >hairpin_info.peer_rxp);
> + if (err != 0) {
> + PMD_DRV_LOG(ERR, "Fail to update Tx
> hairpin queue info");
> + return err;
> + }
> + flag = 1;
[Liu, Mingxia] The variable flag is not been used, can it be removed?
> + }
> + err = cpfl_hairpin_txq_config(vport, cpfl_txq);
> + if (err != 0) {
> + PMD_DRV_LOG(ERR, "Fail to configure hairpin
> Tx queue %u", i);
> + return err;
> + }
> + tx_cmplq_flag = 1;
> + }
> + }
> +
> + /* For non-cross vport hairpin queues, configure Tx completion queue
> first.*/
> + if (tx_cmplq_flag == 1 && cpfl_vport->p2p_tx_complq != NULL) {
> + err = cpfl_hairpin_tx_complq_config(cpfl_vport);
> if (err != 0) {
> - PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
> + PMD_DRV_LOG(ERR, "Fail to config Tx completion
> queue");
> return err;
> }
> }
>
[Liu, Mingxia] Better to move this code next to
+ err = cpfl_hairpin_txq_config(vport, cpfl_txq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u", i);
+ return err;
+ }
When cpfl_rxq->hairpin_info.hairpin_q is true, then cpfl_vport->p2p_tx_complq is not null, right ?
And remove tx_cmplq_flag?
> + /* For normal data queues, configure, init and enale Rxq.
> + * For non-cross vport hairpin queues, configure Rxq, and then init Rxq.
> + */
> + cpfl_rxq_hairpin_mz_bind(dev);
> for (i = 0; i < dev->data->nb_rx_queues; i++) {
> cpfl_rxq = dev->data->rx_queues[i];
> if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)
> continue;
> - err = cpfl_rx_queue_start(dev, i);
> + if (!cpfl_rxq->hairpin_info.hairpin_q) {
> + err = cpfl_rx_queue_start(dev, i);
> + if (err != 0) {
> + PMD_DRV_LOG(ERR, "Fail to start Rx
> queue %u", i);
> + return err;
> + }
> + } else if (!cpfl_rxq->hairpin_info.manual_bind) {
> + err = cpfl_hairpin_rxq_config(vport, cpfl_rxq);
> + if (err != 0) {
> + PMD_DRV_LOG(ERR, "Fail to configure hairpin
> Rx queue %u", i);
> + return err;
> + }
> + err = cpfl_rx_queue_init(dev, i);
> + if (err != 0) {
> + PMD_DRV_LOG(ERR, "Fail to init hairpin Rx
> queue %u", i);
> + return err;
> + }
> + rx_bufq_flag = 1;
> + }
> + }
> +
> + /* For non-cross vport hairpin queues, configure Rx buffer queue.*/
> + if (rx_bufq_flag == 1 && cpfl_vport->p2p_rx_bufq != NULL) {
> + err = cpfl_hairpin_rx_bufq_config(cpfl_vport);
> if (err != 0) {
> - PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
> + PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue");
> return err;
> }
> }
[Liu, Mingxia] Similar to above.
> diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c index
> 64ed331a6d..040beb5bac 100644
> --- a/drivers/net/cpfl/cpfl_rxtx.c
> +++ b/drivers/net/cpfl/cpfl_rxtx.c
> @@ -930,6 +930,86 @@ cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev,
> uint16_t queue_idx,
> return 0;
> }
>
> +int
> +cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport) {
> + struct idpf_rx_queue *rx_bufq = cpfl_vport->p2p_rx_bufq;
> + struct virtchnl2_rxq_info rxq_info[1] = {0};
> +
> + rxq_info[0].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
> + rxq_info[0].queue_id = rx_bufq->queue_id;
> + rxq_info[0].ring_len = rx_bufq->nb_rx_desc;
> + rxq_info[0].dma_ring_addr = rx_bufq->rx_ring_phys_addr;
> + rxq_info[0].desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
> + rxq_info[0].rx_buffer_low_watermark =
> CPFL_RXBUF_LOW_WATERMARK;
> + rxq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
> + rxq_info[0].data_buffer_size = rx_bufq->rx_buf_len;
> + rxq_info[0].buffer_notif_stride = CPFL_RX_BUF_STRIDE;
> +
> + return idpf_vc_rxq_config_by_info(&cpfl_vport->base, rxq_info, 1); }
> +
> +int
> +cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue
> +*cpfl_rxq) {
> + struct virtchnl2_rxq_info rxq_info[1] = {0};
> + struct idpf_rx_queue *rxq = &cpfl_rxq->base;
> +
> + rxq_info[0].type = VIRTCHNL2_QUEUE_TYPE_RX;
> + rxq_info[0].queue_id = rxq->queue_id;
> + rxq_info[0].ring_len = rxq->nb_rx_desc;
> + rxq_info[0].dma_ring_addr = rxq->rx_ring_phys_addr;
> + rxq_info[0].rx_bufq1_id = rxq->bufq1->queue_id;
> + rxq_info[0].max_pkt_size = vport->max_pkt_len;
> + rxq_info[0].desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
> + rxq_info[0].qflags |= VIRTCHNL2_RX_DESC_SIZE_16BYTE;
> +
> + rxq_info[0].data_buffer_size = rxq->rx_buf_len;
> + rxq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
> + rxq_info[0].rx_buffer_low_watermark =
> CPFL_RXBUF_LOW_WATERMARK;
> +
> + PMD_DRV_LOG(NOTICE, "hairpin: vport %u, Rxq id 0x%x",
> + vport->vport_id, rxq_info[0].queue_id);
> +
> + return idpf_vc_rxq_config_by_info(vport, rxq_info, 1); }
> +
> +int
> +cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport) {
> + struct idpf_tx_queue *tx_complq = cpfl_vport->p2p_tx_complq;
> + struct virtchnl2_txq_info txq_info[1] = {0};
> +
> + txq_info[0].dma_ring_addr = tx_complq->tx_ring_phys_addr;
> + txq_info[0].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
> + txq_info[0].queue_id = tx_complq->queue_id;
> + txq_info[0].ring_len = tx_complq->nb_tx_desc;
> + txq_info[0].peer_rx_queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
> + txq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
> + txq_info[0].sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
> +
> + return idpf_vc_txq_config_by_info(&cpfl_vport->base, txq_info, 1); }
> +
> +int
> +cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue
> +*cpfl_txq) {
> + struct idpf_tx_queue *txq = &cpfl_txq->base;
> + struct virtchnl2_txq_info txq_info[1] = {0};
> +
> + txq_info[0].dma_ring_addr = txq->tx_ring_phys_addr;
> + txq_info[0].type = VIRTCHNL2_QUEUE_TYPE_TX;
> + txq_info[0].queue_id = txq->queue_id;
> + txq_info[0].ring_len = txq->nb_tx_desc;
> + txq_info[0].tx_compl_queue_id = txq->complq->queue_id;
> + txq_info[0].relative_queue_id = txq->queue_id;
> + txq_info[0].peer_rx_queue_id = cpfl_txq->hairpin_info.peer_rxq_id;
> + txq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
> + txq_info[0].sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
> +
> + return idpf_vc_txq_config_by_info(vport, txq_info, 1); }
> +
> int
> cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id) { diff --git
> a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h index
> d844c9f057..b01ce5edf9 100644
> --- a/drivers/net/cpfl/cpfl_rxtx.h
> +++ b/drivers/net/cpfl/cpfl_rxtx.h
> @@ -30,12 +30,15 @@
> #define CPFL_RING_BASE_ALIGN 128
>
> #define CPFL_DEFAULT_RX_FREE_THRESH 32
> +#define CPFL_RXBUF_LOW_WATERMARK 64
>
> #define CPFL_DEFAULT_TX_RS_THRESH 32
> #define CPFL_DEFAULT_TX_FREE_THRESH 32
>
> #define CPFL_SUPPORT_CHAIN_NUM 5
>
> +#define CPFL_RX_BUF_STRIDE 64
> +
> struct cpfl_rxq_hairpin_info {
> bool hairpin_q; /* if rx queue is a hairpin queue */
> bool manual_bind; /* for cross vport */
> @@ -85,4 +88,8 @@ int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev,
> uint16_t queue_idx, int cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev,
> uint16_t queue_idx,
> uint16_t nb_desc,
> const struct rte_eth_hairpin_conf *conf);
> +int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport); int
> +cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue
> +*cpfl_txq); int cpfl_hairpin_rx_bufq_config(struct cpfl_vport
> +*cpfl_vport); int cpfl_hairpin_rxq_config(struct idpf_vport *vport,
> +struct cpfl_rx_queue *cpfl_rxq);
> #endif /* _CPFL_RXTX_H_ */
> --
> 2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v2 00/10] add hairpin queue support
2023-04-21 6:50 [PATCH 00/10] add hairpin queue support beilei.xing
` (9 preceding siblings ...)
2023-04-21 6:50 ` [PATCH 10/10] net/cpfl: support hairpin bind/unbind beilei.xing
@ 2023-05-19 5:10 ` beilei.xing
2023-05-19 5:10 ` [PATCH v2 01/10] net/cpfl: refine structures beilei.xing
` (11 more replies)
10 siblings, 12 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-19 5:10 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patchset adds hairpin queue support.
v2 changes:
- change hairpin rx queus configuration sequence.
- code refine.
Beilei Xing (10):
net/cpfl: refine structures
net/cpfl: support hairpin queue capbility get
common/idpf: support queue groups add/delete
net/cpfl: add haipin queue group during vport init
net/cpfl: support hairpin queue setup and release
net/cpfl: support hairpin queue configuration
net/cpfl: support hairpin queue start/stop
net/cpfl: enable write back based on ITR expire
net/cpfl: support peer ports get
net/cpfl: support hairpin bind/unbind
drivers/common/idpf/idpf_common_device.c | 75 ++
drivers/common/idpf/idpf_common_device.h | 4 +
drivers/common/idpf/idpf_common_virtchnl.c | 138 +++-
drivers/common/idpf/idpf_common_virtchnl.h | 18 +
drivers/common/idpf/version.map | 6 +
drivers/net/cpfl/cpfl_ethdev.c | 591 +++++++++++++--
drivers/net/cpfl/cpfl_ethdev.h | 36 +-
drivers/net/cpfl/cpfl_rxtx.c | 807 +++++++++++++++++++--
drivers/net/cpfl/cpfl_rxtx.h | 65 ++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 21 +-
10 files changed, 1642 insertions(+), 119 deletions(-)
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v2 01/10] net/cpfl: refine structures
2023-05-19 5:10 ` [PATCH v2 00/10] add hairpin queue support beilei.xing
@ 2023-05-19 5:10 ` beilei.xing
2023-05-19 5:10 ` [PATCH v2 02/10] net/cpfl: support hairpin queue capbility get beilei.xing
` (10 subsequent siblings)
11 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-19 5:10 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch refines some structures to support hairpin queue,
cpfl_rx_queue/cpfl_tx_queue/cpfl_vport.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 85 +++++++-----
drivers/net/cpfl/cpfl_ethdev.h | 6 +-
drivers/net/cpfl/cpfl_rxtx.c | 175 +++++++++++++++++-------
drivers/net/cpfl/cpfl_rxtx.h | 8 ++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 17 +--
5 files changed, 196 insertions(+), 95 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 306b8ad769..4a507f05d5 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -124,7 +124,8 @@ static int
cpfl_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct rte_eth_link new_link;
unsigned int i;
@@ -156,7 +157,8 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,
static int
cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
dev_info->max_rx_queues = base->caps.max_rx_q;
@@ -216,7 +218,8 @@ cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
static int
cpfl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
/* mtu setting is forbidden if port is start */
if (dev->data->dev_started) {
@@ -256,12 +259,12 @@ static uint64_t
cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
{
uint64_t mbuf_alloc_failed = 0;
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i = 0;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- mbuf_alloc_failed += __atomic_load_n(&rxq->rx_stats.mbuf_alloc_failed,
+ cpfl_rxq = dev->data->rx_queues[i];
+ mbuf_alloc_failed += __atomic_load_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,
__ATOMIC_RELAXED);
}
@@ -271,8 +274,8 @@ cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
static int
cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
int ret;
@@ -305,20 +308,20 @@ cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
static void
cpfl_reset_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- __atomic_store_n(&rxq->rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
+ cpfl_rxq = dev->data->rx_queues[i];
+ __atomic_store_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
}
}
static int
cpfl_dev_stats_reset(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
int ret;
@@ -343,8 +346,8 @@ static int cpfl_dev_xstats_reset(struct rte_eth_dev *dev)
static int cpfl_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned int n)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
unsigned int i;
int ret;
@@ -459,7 +462,8 @@ cpfl_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t idx, shift;
int ret = 0;
@@ -498,7 +502,8 @@ cpfl_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t idx, shift;
int ret = 0;
@@ -536,7 +541,8 @@ static int
cpfl_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
int ret = 0;
@@ -601,7 +607,8 @@ static int
cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
int ret = 0;
@@ -638,7 +645,8 @@ cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,
static int
cpfl_dev_configure(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct rte_eth_conf *conf = &dev->data->dev_conf;
struct idpf_adapter *base = vport->adapter;
int ret;
@@ -710,7 +718,8 @@ cpfl_dev_configure(struct rte_eth_dev *dev)
static int
cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
uint16_t nb_rx_queues = dev->data->nb_rx_queues;
return idpf_vport_irq_map_config(vport, nb_rx_queues);
@@ -719,14 +728,14 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
static int
cpfl_start_queues(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
- struct idpf_tx_queue *txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
int err = 0;
int i;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL || txq->tx_deferred_start)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
continue;
err = cpfl_tx_queue_start(dev, i);
if (err != 0) {
@@ -736,8 +745,8 @@ cpfl_start_queues(struct rte_eth_dev *dev)
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- if (rxq == NULL || rxq->rx_deferred_start)
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)
continue;
err = cpfl_rx_queue_start(dev, i);
if (err != 0) {
@@ -752,7 +761,8 @@ cpfl_start_queues(struct rte_eth_dev *dev)
static int
cpfl_dev_start(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(base);
uint16_t num_allocated_vectors = base->caps.num_allocated_vectors;
@@ -815,7 +825,8 @@ cpfl_dev_start(struct rte_eth_dev *dev)
static int
cpfl_dev_stop(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
if (vport->stopped == 1)
return 0;
@@ -836,7 +847,8 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
static int
cpfl_dev_close(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
@@ -846,7 +858,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
adapter->cur_vport_nb--;
dev->data->dev_private = NULL;
adapter->vports[vport->sw_idx] = NULL;
- rte_free(vport);
+ rte_free(cpfl_vport);
return 0;
}
@@ -1051,7 +1063,7 @@ cpfl_find_vport(struct cpfl_adapter_ext *adapter, uint32_t vport_id)
int i;
for (i = 0; i < adapter->cur_vport_nb; i++) {
- vport = adapter->vports[i];
+ vport = &adapter->vports[i]->base;
if (vport->vport_id != vport_id)
continue;
else
@@ -1328,7 +1340,8 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter)
static int
cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_vport_param *param = init_params;
struct cpfl_adapter_ext *adapter = param->adapter;
/* for sending create vport virtchnl msg prepare */
@@ -1354,7 +1367,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
goto err;
}
- adapter->vports[param->idx] = vport;
+ adapter->vports[param->idx] = cpfl_vport;
adapter->cur_vports |= RTE_BIT32(param->devarg_id);
adapter->cur_vport_nb++;
@@ -1470,7 +1483,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
snprintf(name, sizeof(name), "cpfl_%s_vport_0",
pci_dev->device.name);
retval = rte_eth_dev_create(&pci_dev->device, name,
- sizeof(struct idpf_vport),
+ sizeof(struct cpfl_vport),
NULL, NULL, cpfl_dev_vport_init,
&vport_param);
if (retval != 0)
@@ -1488,7 +1501,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
pci_dev->device.name,
devargs.req_vports[i]);
retval = rte_eth_dev_create(&pci_dev->device, name,
- sizeof(struct idpf_vport),
+ sizeof(struct cpfl_vport),
NULL, NULL, cpfl_dev_vport_init,
&vport_param);
if (retval != 0)
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 200dfcac02..81fe9ac4c3 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -69,13 +69,17 @@ struct cpfl_devargs {
uint16_t req_vport_nb;
};
+struct cpfl_vport {
+ struct idpf_vport base;
+};
+
struct cpfl_adapter_ext {
TAILQ_ENTRY(cpfl_adapter_ext) next;
struct idpf_adapter base;
char name[CPFL_ADAPTER_NAME_LEN];
- struct idpf_vport **vports;
+ struct cpfl_vport **vports;
uint16_t max_vport_nb;
uint16_t cur_vports; /* bit mask of created vport */
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index de59b31b3d..a441e2ffbe 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -128,7 +128,8 @@ cpfl_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *rxq,
uint16_t nb_desc, unsigned int socket_id,
struct rte_mempool *mp, uint8_t bufq_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct idpf_hw *hw = &base->hw;
const struct rte_memzone *mz;
@@ -219,15 +220,69 @@ cpfl_rx_split_bufq_release(struct idpf_rx_queue *bufq)
rte_free(bufq);
}
+static void
+cpfl_rx_queue_release(void *rxq)
+{
+ struct cpfl_rx_queue *cpfl_rxq = rxq;
+ struct idpf_rx_queue *q = NULL;
+
+ if (cpfl_rxq == NULL)
+ return;
+
+ q = &cpfl_rxq->base;
+
+ /* Split queue */
+ if (!q->adapter->is_rx_singleq) {
+ if (q->bufq2)
+ cpfl_rx_split_bufq_release(q->bufq2);
+
+ if (q->bufq1)
+ cpfl_rx_split_bufq_release(q->bufq1);
+
+ rte_free(cpfl_rxq);
+ return;
+ }
+
+ /* Single queue */
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(cpfl_rxq);
+}
+
+static void
+cpfl_tx_queue_release(void *txq)
+{
+ struct cpfl_tx_queue *cpfl_txq = txq;
+ struct idpf_tx_queue *q = NULL;
+
+ if (cpfl_txq == NULL)
+ return;
+
+ q = &cpfl_txq->base;
+
+ if (q->complq) {
+ rte_memzone_free(q->complq->mz);
+ rte_free(q->complq);
+ }
+
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(cpfl_txq);
+}
+
int
cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct idpf_hw *hw = &base->hw;
+ struct cpfl_rx_queue *cpfl_rxq;
const struct rte_memzone *mz;
struct idpf_rx_queue *rxq;
uint16_t rx_free_thresh;
@@ -247,21 +302,23 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Free memory if needed */
if (dev->data->rx_queues[queue_idx] != NULL) {
- idpf_qc_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
dev->data->rx_queues[queue_idx] = NULL;
}
/* Setup Rx queue */
- rxq = rte_zmalloc_socket("cpfl rxq",
- sizeof(struct idpf_rx_queue),
+ cpfl_rxq = rte_zmalloc_socket("cpfl rxq",
+ sizeof(struct cpfl_rx_queue),
RTE_CACHE_LINE_SIZE,
socket_id);
- if (rxq == NULL) {
+ if (cpfl_rxq == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
ret = -ENOMEM;
goto err_rxq_alloc;
}
+ rxq = &cpfl_rxq->base;
+
is_splitq = !!(vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
rxq->mp = mp;
@@ -328,7 +385,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
rxq->q_set = true;
- dev->data->rx_queues[queue_idx] = rxq;
+ dev->data->rx_queues[queue_idx] = cpfl_rxq;
return 0;
@@ -348,7 +405,8 @@ cpfl_tx_complq_setup(struct rte_eth_dev *dev, struct idpf_tx_queue *txq,
uint16_t queue_idx, uint16_t nb_desc,
unsigned int socket_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
const struct rte_memzone *mz;
struct idpf_tx_queue *cq;
int ret;
@@ -396,9 +454,11 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t tx_rs_thresh, tx_free_thresh;
+ struct cpfl_tx_queue *cpfl_txq;
struct idpf_hw *hw = &base->hw;
const struct rte_memzone *mz;
struct idpf_tx_queue *txq;
@@ -418,21 +478,23 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Free memory if needed. */
if (dev->data->tx_queues[queue_idx] != NULL) {
- idpf_qc_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
dev->data->tx_queues[queue_idx] = NULL;
}
/* Allocate the TX queue data structure. */
- txq = rte_zmalloc_socket("cpfl txq",
- sizeof(struct idpf_tx_queue),
+ cpfl_txq = rte_zmalloc_socket("cpfl txq",
+ sizeof(struct cpfl_tx_queue),
RTE_CACHE_LINE_SIZE,
socket_id);
- if (txq == NULL) {
+ if (cpfl_txq == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
ret = -ENOMEM;
goto err_txq_alloc;
}
+ txq = &cpfl_txq->base;
+
is_splitq = !!(vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
txq->nb_tx_desc = nb_desc;
@@ -486,7 +548,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
queue_idx * vport->chunks_info.tx_qtail_spacing);
txq->ops = &def_txq_ops;
txq->q_set = true;
- dev->data->tx_queues[queue_idx] = txq;
+ dev->data->tx_queues[queue_idx] = cpfl_txq;
return 0;
@@ -502,6 +564,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
+ struct cpfl_rx_queue *cpfl_rxq;
struct idpf_rx_queue *rxq;
uint16_t max_pkt_len;
uint32_t frame_size;
@@ -510,7 +573,8 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (rx_queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
- rxq = dev->data->rx_queues[rx_queue_id];
+ cpfl_rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = &cpfl_rxq->base;
if (rxq == NULL || !rxq->q_set) {
PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
@@ -574,9 +638,10 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_rx_queue *rxq =
- dev->data->rx_queues[rx_queue_id];
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq = dev->data->rx_queues[rx_queue_id];
+ struct idpf_rx_queue *rxq = &cpfl_rxq->base;
int err = 0;
err = idpf_vc_rxq_config(vport, rxq);
@@ -609,15 +674,15 @@ cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
if (tx_queue_id >= dev->data->nb_tx_queues)
return -EINVAL;
- txq = dev->data->tx_queues[tx_queue_id];
+ cpfl_txq = dev->data->tx_queues[tx_queue_id];
/* Init the RX tail register. */
- IDPF_PCI_REG_WRITE(txq->qtx_tail, 0);
+ IDPF_PCI_REG_WRITE(cpfl_txq->base.qtx_tail, 0);
return 0;
}
@@ -625,12 +690,13 @@ cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
int
cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_tx_queue *txq =
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_tx_queue *cpfl_txq =
dev->data->tx_queues[tx_queue_id];
int err = 0;
- err = idpf_vc_txq_config(vport, txq);
+ err = idpf_vc_txq_config(vport, &cpfl_txq->base);
if (err != 0) {
PMD_DRV_LOG(ERR, "Fail to configure Tx queue %u", tx_queue_id);
return err;
@@ -649,7 +715,7 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
tx_queue_id);
} else {
- txq->q_started = true;
+ cpfl_txq->base.q_started = true;
dev->data->tx_queue_state[tx_queue_id] =
RTE_ETH_QUEUE_STATE_STARTED;
}
@@ -660,13 +726,16 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
int
cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq;
struct idpf_rx_queue *rxq;
int err;
if (rx_queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
+ cpfl_rxq = dev->data->rx_queues[rx_queue_id];
err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
@@ -674,7 +743,7 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return err;
}
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = &cpfl_rxq->base;
rxq->q_started = false;
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
rxq->ops->release_mbufs(rxq);
@@ -692,13 +761,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_tx_queue *cpfl_txq;
struct idpf_tx_queue *txq;
int err;
if (tx_queue_id >= dev->data->nb_tx_queues)
return -EINVAL;
+ cpfl_txq = dev->data->tx_queues[tx_queue_id];
+
err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
@@ -706,7 +779,7 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
return err;
}
- txq = dev->data->tx_queues[tx_queue_id];
+ txq = &cpfl_txq->base;
txq->q_started = false;
txq->ops->release_mbufs(txq);
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
@@ -723,25 +796,25 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
void
cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- idpf_qc_rx_queue_release(dev->data->rx_queues[qid]);
+ cpfl_rx_queue_release(dev->data->rx_queues[qid]);
}
void
cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- idpf_qc_tx_queue_release(dev->data->tx_queues[qid]);
+ cpfl_tx_queue_release(dev->data->tx_queues[qid]);
}
void
cpfl_stop_queues(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
- struct idpf_tx_queue *txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
int i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- if (rxq == NULL)
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq == NULL)
continue;
if (cpfl_rx_queue_stop(dev, i) != 0)
@@ -749,8 +822,8 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL)
continue;
if (cpfl_tx_queue_stop(dev, i) != 0)
@@ -761,9 +834,10 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
void
cpfl_set_rx_function(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
#ifdef RTE_ARCH_X86
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i;
if (cpfl_rx_vec_dev_check_default(dev) == CPFL_VECTOR_PATH &&
@@ -789,8 +863,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- (void)idpf_qc_splitq_rx_vec_setup(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ (void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
if (vport->rx_use_avx512) {
@@ -809,8 +883,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
} else {
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- (void)idpf_qc_singleq_rx_vec_setup(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ (void)idpf_qc_singleq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
if (vport->rx_use_avx512) {
@@ -859,10 +933,11 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
void
cpfl_set_tx_function(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
#ifdef RTE_ARCH_X86
#ifdef CC_AVX512_SUPPORT
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
int i;
#endif /* CC_AVX512_SUPPORT */
@@ -877,8 +952,8 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
vport->tx_use_avx512 = true;
if (vport->tx_use_avx512) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- idpf_qc_tx_vec_avx512_setup(txq);
+ cpfl_txq = dev->data->tx_queues[i];
+ idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);
}
}
}
@@ -915,10 +990,10 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
#ifdef CC_AVX512_SUPPORT
if (vport->tx_use_avx512) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL)
continue;
- idpf_qc_tx_vec_avx512_setup(txq);
+ idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);
}
PMD_DRV_LOG(NOTICE,
"Using Single AVX512 Vector Tx (port %d).",
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index fb267d38c8..bfb9ad97bd 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -23,6 +23,14 @@
#define CPFL_SUPPORT_CHAIN_NUM 5
+struct cpfl_rx_queue {
+ struct idpf_rx_queue base;
+};
+
+struct cpfl_tx_queue {
+ struct idpf_tx_queue base;
+};
+
int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
index 665418d27d..5690b17911 100644
--- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
+++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
@@ -76,15 +76,16 @@ cpfl_rx_splitq_vec_default(struct idpf_rx_queue *rxq)
static inline int
cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_rx_queue *rxq;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq;
int i, default_ret, splitq_ret, ret = CPFL_SCALAR_PATH;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- default_ret = cpfl_rx_vec_queue_default(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
- splitq_ret = cpfl_rx_splitq_vec_default(rxq);
+ splitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq->base);
ret = splitq_ret && default_ret;
} else {
ret = default_ret;
@@ -100,12 +101,12 @@ static inline int
cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)
{
int i;
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
int ret = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- ret = cpfl_tx_vec_queue_default(txq);
+ cpfl_txq = dev->data->tx_queues[i];
+ ret = cpfl_tx_vec_queue_default(&cpfl_txq->base);
if (ret == CPFL_SCALAR_PATH)
return CPFL_SCALAR_PATH;
}
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v2 02/10] net/cpfl: support hairpin queue capbility get
2023-05-19 5:10 ` [PATCH v2 00/10] add hairpin queue support beilei.xing
2023-05-19 5:10 ` [PATCH v2 01/10] net/cpfl: refine structures beilei.xing
@ 2023-05-19 5:10 ` beilei.xing
2023-05-19 5:10 ` [PATCH v2 03/10] common/idpf: support queue groups add/delete beilei.xing
` (9 subsequent siblings)
11 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-19 5:10 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch adds hairpin_cap_get ops support.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 13 +++++++++++++
drivers/net/cpfl/cpfl_rxtx.h | 4 ++++
2 files changed, 17 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 4a507f05d5..114fc18f5f 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -154,6 +154,18 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,
return rte_eth_linkstatus_set(dev, &new_link);
}
+static int
+cpfl_hairpin_cap_get(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_hairpin_cap *cap)
+{
+ cap->max_nb_queues = CPFL_MAX_P2P_NB_QUEUES;
+ cap->max_rx_2_tx = CPFL_MAX_HAIRPINQ_RX_2_TX;
+ cap->max_tx_2_rx = CPFL_MAX_HAIRPINQ_TX_2_RX;
+ cap->max_nb_desc = CPFL_MAX_HAIRPINQ_NB_DESC;
+
+ return 0;
+}
+
static int
cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
@@ -889,6 +901,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.xstats_get = cpfl_dev_xstats_get,
.xstats_get_names = cpfl_dev_xstats_get_names,
.xstats_reset = cpfl_dev_xstats_reset,
+ .hairpin_cap_get = cpfl_hairpin_cap_get,
};
static int
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index bfb9ad97bd..b2b3537d10 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -13,6 +13,10 @@
#define CPFL_MIN_RING_DESC 32
#define CPFL_MAX_RING_DESC 4096
#define CPFL_DMA_MEM_ALIGN 4096
+#define CPFL_MAX_HAIRPINQ_RX_2_TX 1
+#define CPFL_MAX_HAIRPINQ_TX_2_RX 1
+#define CPFL_MAX_HAIRPINQ_NB_DESC 1024
+#define CPFL_MAX_P2P_NB_QUEUES 16
/* Base address of the HW descriptor ring should be 128B aligned. */
#define CPFL_RING_BASE_ALIGN 128
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v2 03/10] common/idpf: support queue groups add/delete
2023-05-19 5:10 ` [PATCH v2 00/10] add hairpin queue support beilei.xing
2023-05-19 5:10 ` [PATCH v2 01/10] net/cpfl: refine structures beilei.xing
2023-05-19 5:10 ` [PATCH v2 02/10] net/cpfl: support hairpin queue capbility get beilei.xing
@ 2023-05-19 5:10 ` beilei.xing
2023-05-19 5:10 ` [PATCH v2 04/10] net/cpfl: add haipin queue group during vport init beilei.xing
` (8 subsequent siblings)
11 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-19 5:10 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds queue group add/delete virtual channel support.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 66 ++++++++++++++++++++++
drivers/common/idpf/idpf_common_virtchnl.h | 9 +++
drivers/common/idpf/version.map | 2 +
3 files changed, 77 insertions(+)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index a4e129062e..5ce8bb76ad 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -359,6 +359,72 @@ idpf_vc_vport_destroy(struct idpf_vport *vport)
return err;
}
+int
+idpf_vc_queue_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *p2p_queue_grps_info,
+ uint8_t *p2p_queue_grps_out)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_cmd_info args;
+ int size, qg_info_size;
+ int err = -1;
+
+ size = sizeof(*p2p_queue_grps_info) +
+ (p2p_queue_grps_info->qg_info.num_queue_groups - 1) *
+ sizeof(struct virtchnl2_queue_group_info);
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_ADD_QUEUE_GROUPS;
+ args.in_args = (uint8_t *)p2p_queue_grps_info;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ if (err != 0) {
+ DRV_LOG(ERR,
+ "Failed to execute command of VIRTCHNL2_OP_ADD_QUEUE_GROUPS");
+ return err;
+ }
+
+ rte_memcpy(p2p_queue_grps_out, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
+ return 0;
+}
+
+int idpf_vc_queue_grps_del(struct idpf_vport *vport,
+ uint16_t num_q_grps,
+ struct virtchnl2_queue_group_id *qg_ids)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_delete_queue_groups *vc_del_q_grps;
+ struct idpf_cmd_info args;
+ int size;
+ int err;
+
+ size = sizeof(*vc_del_q_grps) +
+ (num_q_grps - 1) * sizeof(struct virtchnl2_queue_group_id);
+ vc_del_q_grps = rte_zmalloc("vc_del_q_grps", size, 0);
+
+ vc_del_q_grps->vport_id = vport->vport_id;
+ vc_del_q_grps->num_queue_groups = num_q_grps;
+ memcpy(vc_del_q_grps->qg_ids, qg_ids,
+ num_q_grps * sizeof(struct virtchnl2_queue_group_id));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_DEL_QUEUE_GROUPS;
+ args.in_args = (uint8_t *)vc_del_q_grps;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_DEL_QUEUE_GROUPS");
+
+ rte_free(vc_del_q_grps);
+ return err;
+}
+
int
idpf_vc_rss_key_set(struct idpf_vport *vport)
{
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index d479d93c8e..bf1d014c8d 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -64,4 +64,13 @@ int idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
__rte_internal
int idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
u16 *buff_count, struct idpf_dma_mem **buffs);
+__rte_internal
+int idpf_vc_queue_grps_del(struct idpf_vport *vport,
+ uint16_t num_q_grps,
+ struct virtchnl2_queue_group_id *qg_ids);
+__rte_internal
+int
+idpf_vc_queue_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *ptp_queue_grps_info,
+ uint8_t *ptp_queue_grps_out);
#endif /* _IDPF_COMMON_VIRTCHNL_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 7076759024..aa67f7ee27 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -48,6 +48,8 @@ INTERNAL {
idpf_vc_irq_map_unmap_config;
idpf_vc_one_msg_read;
idpf_vc_ptype_info_query;
+ idpf_vc_queue_grps_add;
+ idpf_vc_queue_grps_del;
idpf_vc_queue_switch;
idpf_vc_queues_ena_dis;
idpf_vc_rss_hash_get;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v2 04/10] net/cpfl: add haipin queue group during vport init
2023-05-19 5:10 ` [PATCH v2 00/10] add hairpin queue support beilei.xing
` (2 preceding siblings ...)
2023-05-19 5:10 ` [PATCH v2 03/10] common/idpf: support queue groups add/delete beilei.xing
@ 2023-05-19 5:10 ` beilei.xing
2023-05-19 5:10 ` [PATCH v2 04/10] net/cpfl: add haipin queue group during vpotr init beilei.xing
` (7 subsequent siblings)
11 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-19 5:10 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds haipin queue group during vport init.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 130 +++++++++++++++++++++++++++++++++
drivers/net/cpfl/cpfl_ethdev.h | 18 +++++
drivers/net/cpfl/cpfl_rxtx.h | 4 +
3 files changed, 152 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 114fc18f5f..7ba425f533 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -856,6 +856,20 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
return 0;
}
+static int
+cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
+{
+ struct virtchnl2_queue_group_id qg_ids[CPFL_P2P_NB_QUEUE_GRPS] = {0};
+ int ret = 0;
+
+ qg_ids[0].queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
+ qg_ids[0].queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
+ ret = idpf_vc_queue_grps_del(vport, CPFL_P2P_NB_QUEUE_GRPS, qg_ids);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to delete p2p queue groups");
+ return ret;
+}
+
static int
cpfl_dev_close(struct rte_eth_dev *dev)
{
@@ -864,6 +878,10 @@ cpfl_dev_close(struct rte_eth_dev *dev)
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
+
+ if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
+ cpfl_p2p_queue_grps_del(vport);
+
idpf_vport_deinit(vport);
adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
@@ -1350,6 +1368,96 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter)
return vport_idx;
}
+static int
+cpfl_p2p_q_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *p2p_queue_grps_info,
+ uint8_t *p2p_q_vc_out_info)
+{
+ int ret;
+
+ p2p_queue_grps_info->vport_id = vport->vport_id;
+ p2p_queue_grps_info->qg_info.num_queue_groups = CPFL_P2P_NB_QUEUE_GRPS;
+ p2p_queue_grps_info->qg_info.groups[0].num_rx_q = CPFL_MAX_P2P_NB_QUEUES;
+ p2p_queue_grps_info->qg_info.groups[0].num_rx_bufq = CPFL_P2P_NB_RX_BUFQ;
+ p2p_queue_grps_info->qg_info.groups[0].num_tx_q = CPFL_MAX_P2P_NB_QUEUES;
+ p2p_queue_grps_info->qg_info.groups[0].num_tx_complq = CPFL_P2P_NB_TX_COMPLQ;
+ p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
+ p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
+ p2p_queue_grps_info->qg_info.groups[0].rx_q_grp_info.rss_lut_size = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.tx_tc = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.priority = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.is_sp = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.pir_weight = 0;
+
+ ret = idpf_vc_queue_grps_add(vport, p2p_queue_grps_info, p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to add p2p queue groups.");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
+ struct virtchnl2_add_queue_groups *p2p_q_vc_out_info)
+{
+ struct p2p_queue_chunks_info *p2p_q_chunks_info = &cpfl_vport->p2p_q_chunks_info;
+ struct virtchnl2_queue_reg_chunks *vc_chunks_out;
+ int i, type;
+
+ if (p2p_q_vc_out_info->qg_info.groups[0].qg_id.queue_group_type !=
+ VIRTCHNL2_QUEUE_GROUP_P2P) {
+ PMD_DRV_LOG(ERR, "Add queue group response mismatch.");
+ return -EINVAL;
+ }
+
+ vc_chunks_out = &p2p_q_vc_out_info->qg_info.groups[0].chunks;
+
+ for (i = 0; i < vc_chunks_out->num_chunks; i++) {
+ type = vc_chunks_out->chunks[i].type;
+ switch (type) {
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ p2p_q_chunks_info->tx_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->tx_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->tx_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ p2p_q_chunks_info->rx_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->rx_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->rx_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ p2p_q_chunks_info->tx_compl_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->tx_compl_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->tx_compl_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ p2p_q_chunks_info->rx_buf_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->rx_buf_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->rx_buf_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported queue type");
+ break;
+ }
+ }
+
+ return 0;
+}
+
static int
cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
{
@@ -1359,6 +1467,8 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
struct cpfl_adapter_ext *adapter = param->adapter;
/* for sending create vport virtchnl msg prepare */
struct virtchnl2_create_vport create_vport_info;
+ struct virtchnl2_add_queue_groups p2p_queue_grps_info;
+ uint8_t p2p_q_vc_out_info[IDPF_DFLT_MBX_BUF_SIZE] = {0};
int ret = 0;
dev->dev_ops = &cpfl_eth_dev_ops;
@@ -1394,8 +1504,28 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
&dev->data->mac_addrs[0]);
+ if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
+ memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
+ ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to add p2p queue group.");
+ goto err_q_grps_add;
+ }
+ ret = cpfl_p2p_queue_info_init(cpfl_vport,
+ (struct virtchnl2_add_queue_groups *)p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to init p2p queue info.");
+ goto err_p2p_qinfo_init;
+ }
+ }
+
return 0;
+err_p2p_qinfo_init:
+ cpfl_p2p_queue_grps_del(vport);
+err_q_grps_add:
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
err_mac_addrs:
adapter->vports[param->idx] = NULL; /* reset */
idpf_vport_deinit(vport);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 81fe9ac4c3..65c9a195b2 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -56,6 +56,7 @@
/* Device IDs */
#define IDPF_DEV_ID_CPF 0x1453
+#define VIRTCHNL2_QUEUE_GROUP_P2P 0x100
struct cpfl_vport_param {
struct cpfl_adapter_ext *adapter;
@@ -69,8 +70,25 @@ struct cpfl_devargs {
uint16_t req_vport_nb;
};
+struct p2p_queue_chunks_info {
+ uint32_t tx_start_qid;
+ uint32_t rx_start_qid;
+ uint32_t tx_compl_start_qid;
+ uint32_t rx_buf_start_qid;
+
+ uint64_t tx_qtail_start;
+ uint32_t tx_qtail_spacing;
+ uint64_t rx_qtail_start;
+ uint32_t rx_qtail_spacing;
+ uint64_t tx_compl_qtail_start;
+ uint32_t tx_compl_qtail_spacing;
+ uint64_t rx_buf_qtail_start;
+ uint32_t rx_buf_qtail_spacing;
+};
+
struct cpfl_vport {
struct idpf_vport base;
+ struct p2p_queue_chunks_info p2p_q_chunks_info;
};
struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index b2b3537d10..3a87a1f4b3 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -17,6 +17,10 @@
#define CPFL_MAX_HAIRPINQ_TX_2_RX 1
#define CPFL_MAX_HAIRPINQ_NB_DESC 1024
#define CPFL_MAX_P2P_NB_QUEUES 16
+#define CPFL_P2P_NB_RX_BUFQ 1
+#define CPFL_P2P_NB_TX_COMPLQ 1
+#define CPFL_P2P_NB_QUEUE_GRPS 1
+#define CPFL_P2P_QUEUE_GRP_ID 1
/* Base address of the HW descriptor ring should be 128B aligned. */
#define CPFL_RING_BASE_ALIGN 128
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v2 04/10] net/cpfl: add haipin queue group during vpotr init
2023-05-19 5:10 ` [PATCH v2 00/10] add hairpin queue support beilei.xing
` (3 preceding siblings ...)
2023-05-19 5:10 ` [PATCH v2 04/10] net/cpfl: add haipin queue group during vport init beilei.xing
@ 2023-05-19 5:10 ` beilei.xing
2023-05-19 5:10 ` [PATCH v2 05/10] net/cpfl: support hairpin queue setup and release beilei.xing
` (6 subsequent siblings)
11 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-19 5:10 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds haipin queue group during vpotr init.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 130 +++++++++++++++++++++++++++++++++
drivers/net/cpfl/cpfl_ethdev.h | 18 +++++
drivers/net/cpfl/cpfl_rxtx.h | 4 +
3 files changed, 152 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 114fc18f5f..7ba425f533 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -856,6 +856,20 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
return 0;
}
+static int
+cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
+{
+ struct virtchnl2_queue_group_id qg_ids[CPFL_P2P_NB_QUEUE_GRPS] = {0};
+ int ret = 0;
+
+ qg_ids[0].queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
+ qg_ids[0].queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
+ ret = idpf_vc_queue_grps_del(vport, CPFL_P2P_NB_QUEUE_GRPS, qg_ids);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to delete p2p queue groups");
+ return ret;
+}
+
static int
cpfl_dev_close(struct rte_eth_dev *dev)
{
@@ -864,6 +878,10 @@ cpfl_dev_close(struct rte_eth_dev *dev)
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
+
+ if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
+ cpfl_p2p_queue_grps_del(vport);
+
idpf_vport_deinit(vport);
adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
@@ -1350,6 +1368,96 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter)
return vport_idx;
}
+static int
+cpfl_p2p_q_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *p2p_queue_grps_info,
+ uint8_t *p2p_q_vc_out_info)
+{
+ int ret;
+
+ p2p_queue_grps_info->vport_id = vport->vport_id;
+ p2p_queue_grps_info->qg_info.num_queue_groups = CPFL_P2P_NB_QUEUE_GRPS;
+ p2p_queue_grps_info->qg_info.groups[0].num_rx_q = CPFL_MAX_P2P_NB_QUEUES;
+ p2p_queue_grps_info->qg_info.groups[0].num_rx_bufq = CPFL_P2P_NB_RX_BUFQ;
+ p2p_queue_grps_info->qg_info.groups[0].num_tx_q = CPFL_MAX_P2P_NB_QUEUES;
+ p2p_queue_grps_info->qg_info.groups[0].num_tx_complq = CPFL_P2P_NB_TX_COMPLQ;
+ p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
+ p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
+ p2p_queue_grps_info->qg_info.groups[0].rx_q_grp_info.rss_lut_size = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.tx_tc = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.priority = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.is_sp = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.pir_weight = 0;
+
+ ret = idpf_vc_queue_grps_add(vport, p2p_queue_grps_info, p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to add p2p queue groups.");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
+ struct virtchnl2_add_queue_groups *p2p_q_vc_out_info)
+{
+ struct p2p_queue_chunks_info *p2p_q_chunks_info = &cpfl_vport->p2p_q_chunks_info;
+ struct virtchnl2_queue_reg_chunks *vc_chunks_out;
+ int i, type;
+
+ if (p2p_q_vc_out_info->qg_info.groups[0].qg_id.queue_group_type !=
+ VIRTCHNL2_QUEUE_GROUP_P2P) {
+ PMD_DRV_LOG(ERR, "Add queue group response mismatch.");
+ return -EINVAL;
+ }
+
+ vc_chunks_out = &p2p_q_vc_out_info->qg_info.groups[0].chunks;
+
+ for (i = 0; i < vc_chunks_out->num_chunks; i++) {
+ type = vc_chunks_out->chunks[i].type;
+ switch (type) {
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ p2p_q_chunks_info->tx_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->tx_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->tx_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ p2p_q_chunks_info->rx_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->rx_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->rx_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ p2p_q_chunks_info->tx_compl_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->tx_compl_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->tx_compl_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ p2p_q_chunks_info->rx_buf_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->rx_buf_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->rx_buf_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported queue type");
+ break;
+ }
+ }
+
+ return 0;
+}
+
static int
cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
{
@@ -1359,6 +1467,8 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
struct cpfl_adapter_ext *adapter = param->adapter;
/* for sending create vport virtchnl msg prepare */
struct virtchnl2_create_vport create_vport_info;
+ struct virtchnl2_add_queue_groups p2p_queue_grps_info;
+ uint8_t p2p_q_vc_out_info[IDPF_DFLT_MBX_BUF_SIZE] = {0};
int ret = 0;
dev->dev_ops = &cpfl_eth_dev_ops;
@@ -1394,8 +1504,28 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
&dev->data->mac_addrs[0]);
+ if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
+ memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
+ ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to add p2p queue group.");
+ goto err_q_grps_add;
+ }
+ ret = cpfl_p2p_queue_info_init(cpfl_vport,
+ (struct virtchnl2_add_queue_groups *)p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to init p2p queue info.");
+ goto err_p2p_qinfo_init;
+ }
+ }
+
return 0;
+err_p2p_qinfo_init:
+ cpfl_p2p_queue_grps_del(vport);
+err_q_grps_add:
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
err_mac_addrs:
adapter->vports[param->idx] = NULL; /* reset */
idpf_vport_deinit(vport);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 81fe9ac4c3..65c9a195b2 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -56,6 +56,7 @@
/* Device IDs */
#define IDPF_DEV_ID_CPF 0x1453
+#define VIRTCHNL2_QUEUE_GROUP_P2P 0x100
struct cpfl_vport_param {
struct cpfl_adapter_ext *adapter;
@@ -69,8 +70,25 @@ struct cpfl_devargs {
uint16_t req_vport_nb;
};
+struct p2p_queue_chunks_info {
+ uint32_t tx_start_qid;
+ uint32_t rx_start_qid;
+ uint32_t tx_compl_start_qid;
+ uint32_t rx_buf_start_qid;
+
+ uint64_t tx_qtail_start;
+ uint32_t tx_qtail_spacing;
+ uint64_t rx_qtail_start;
+ uint32_t rx_qtail_spacing;
+ uint64_t tx_compl_qtail_start;
+ uint32_t tx_compl_qtail_spacing;
+ uint64_t rx_buf_qtail_start;
+ uint32_t rx_buf_qtail_spacing;
+};
+
struct cpfl_vport {
struct idpf_vport base;
+ struct p2p_queue_chunks_info p2p_q_chunks_info;
};
struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index b2b3537d10..3a87a1f4b3 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -17,6 +17,10 @@
#define CPFL_MAX_HAIRPINQ_TX_2_RX 1
#define CPFL_MAX_HAIRPINQ_NB_DESC 1024
#define CPFL_MAX_P2P_NB_QUEUES 16
+#define CPFL_P2P_NB_RX_BUFQ 1
+#define CPFL_P2P_NB_TX_COMPLQ 1
+#define CPFL_P2P_NB_QUEUE_GRPS 1
+#define CPFL_P2P_QUEUE_GRP_ID 1
/* Base address of the HW descriptor ring should be 128B aligned. */
#define CPFL_RING_BASE_ALIGN 128
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v2 05/10] net/cpfl: support hairpin queue setup and release
2023-05-19 5:10 ` [PATCH v2 00/10] add hairpin queue support beilei.xing
` (4 preceding siblings ...)
2023-05-19 5:10 ` [PATCH v2 04/10] net/cpfl: add haipin queue group during vpotr init beilei.xing
@ 2023-05-19 5:10 ` beilei.xing
2023-05-19 5:10 ` [PATCH v2 06/10] net/cpfl: support hairpin queue configuration beilei.xing
` (5 subsequent siblings)
11 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-19 5:10 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
Support hairpin Rx/Tx queue setup and release.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 6 +
drivers/net/cpfl/cpfl_ethdev.h | 12 +
drivers/net/cpfl/cpfl_rxtx.c | 373 +++++++++++++++++++++++-
drivers/net/cpfl/cpfl_rxtx.h | 26 ++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 4 +
5 files changed, 420 insertions(+), 1 deletion(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 7ba425f533..6e1cfd78bb 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -878,6 +878,10 @@ cpfl_dev_close(struct rte_eth_dev *dev)
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
+ if (cpfl_vport->p2p_mp) {
+ rte_mempool_free(cpfl_vport->p2p_mp);
+ cpfl_vport->p2p_mp = NULL;
+ }
if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
cpfl_p2p_queue_grps_del(vport);
@@ -920,6 +924,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.xstats_get_names = cpfl_dev_xstats_get_names,
.xstats_reset = cpfl_dev_xstats_reset,
.hairpin_cap_get = cpfl_hairpin_cap_get,
+ .rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
+ .tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
};
static int
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 65c9a195b2..a48344299c 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -89,6 +89,18 @@ struct p2p_queue_chunks_info {
struct cpfl_vport {
struct idpf_vport base;
struct p2p_queue_chunks_info p2p_q_chunks_info;
+
+ struct rte_mempool *p2p_mp;
+
+ uint16_t nb_data_rxq;
+ uint16_t nb_data_txq;
+ uint16_t nb_p2p_rxq;
+ uint16_t nb_p2p_txq;
+
+ struct idpf_rx_queue *p2p_rx_bufq;
+ struct idpf_tx_queue *p2p_tx_complq;
+ bool p2p_manual_bind;
+
};
struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index a441e2ffbe..eaec16a9f7 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -10,6 +10,79 @@
#include "cpfl_rxtx.h"
#include "cpfl_rxtx_vec_common.h"
+uint16_t
+cpfl_hw_qid_get(uint16_t start_qid, uint16_t offset)
+{
+ return start_qid + offset;
+}
+
+uint64_t
+cpfl_hw_qtail_get(uint64_t tail_start, uint16_t offset, uint64_t tail_spacing)
+{
+ return tail_start + offset * tail_spacing;
+}
+
+static inline void
+cpfl_tx_hairpin_descq_reset(struct idpf_tx_queue *txq)
+{
+ uint32_t i, size;
+
+ if (!txq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+ return;
+ }
+
+ size = txq->nb_tx_desc * CPFL_P2P_DESC_LEN;
+ for (i = 0; i < size; i++)
+ ((volatile char *)txq->desc_ring)[i] = 0;
+}
+
+static inline void
+cpfl_tx_hairpin_complq_reset(struct idpf_tx_queue *cq)
+{
+ uint32_t i, size;
+
+ if (!cq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to complq is NULL");
+ return;
+ }
+
+ size = cq->nb_tx_desc * CPFL_P2P_DESC_LEN;
+ for (i = 0; i < size; i++)
+ ((volatile char *)cq->compl_ring)[i] = 0;
+}
+
+static inline void
+cpfl_rx_hairpin_descq_reset(struct idpf_rx_queue *rxq)
+{
+ uint16_t len;
+ uint32_t i;
+
+ if (!rxq)
+ return;
+
+ len = rxq->nb_rx_desc;
+ for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
+ ((volatile char *)rxq->rx_ring)[i] = 0;
+}
+
+static inline void
+cpfl_rx_hairpin_bufq_reset(struct idpf_rx_queue *rxbq)
+{
+ uint16_t len;
+ uint32_t i;
+
+ if (!rxbq)
+ return;
+
+ len = rxbq->nb_rx_desc;
+ for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
+ ((volatile char *)rxbq->rx_ring)[i] = 0;
+
+ rxbq->bufq1 = NULL;
+ rxbq->bufq2 = NULL;
+}
+
static uint64_t
cpfl_rx_offload_convert(uint64_t offload)
{
@@ -233,7 +306,10 @@ cpfl_rx_queue_release(void *rxq)
/* Split queue */
if (!q->adapter->is_rx_singleq) {
- if (q->bufq2)
+ /* the mz is shared between Tx/Rx hairpin, let Rx_release
+ * free the buf, q->bufq1->mz and q->mz.
+ */
+ if (!cpfl_rxq->hairpin_info.hairpin_q && q->bufq2)
cpfl_rx_split_bufq_release(q->bufq2);
if (q->bufq1)
@@ -384,6 +460,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
}
+ cpfl_vport->nb_data_rxq++;
rxq->q_set = true;
dev->data->rx_queues[queue_idx] = cpfl_rxq;
@@ -547,6 +624,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
queue_idx * vport->chunks_info.tx_qtail_spacing);
txq->ops = &def_txq_ops;
+ cpfl_vport->nb_data_txq++;
txq->q_set = true;
dev->data->tx_queues[queue_idx] = cpfl_txq;
@@ -561,6 +639,297 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return ret;
}
+static int
+cpfl_rx_hairpin_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,
+ uint16_t logic_qid, uint16_t nb_desc)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct rte_mempool *mp;
+ char pool_name[RTE_MEMPOOL_NAMESIZE];
+
+ mp = cpfl_vport->p2p_mp;
+ if (!mp) {
+ snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "p2p_mb_pool_%u",
+ dev->data->port_id);
+ mp = rte_pktmbuf_pool_create(pool_name, CPFL_P2P_NB_MBUF, CPFL_P2P_CACHE_SIZE,
+ 0, CPFL_P2P_MBUF_SIZE, dev->device->numa_node);
+ if (!mp) {
+ PMD_INIT_LOG(ERR, "Failed to allocate mbuf pool for p2p");
+ return -ENOMEM;
+ }
+ cpfl_vport->p2p_mp = mp;
+ }
+
+ bufq->mp = mp;
+ bufq->nb_rx_desc = nb_desc;
+ bufq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info.rx_buf_start_qid, logic_qid);
+ bufq->port_id = dev->data->port_id;
+ bufq->adapter = adapter;
+ bufq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
+
+ bufq->sw_ring = rte_zmalloc("sw ring",
+ sizeof(struct rte_mbuf *) * nb_desc,
+ RTE_CACHE_LINE_SIZE);
+ if (!bufq->sw_ring) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
+ return -ENOMEM;
+ }
+
+ bufq->q_set = true;
+ bufq->ops = &def_rxq_ops;
+
+ return 0;
+}
+
+int
+cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf)
+{
+ struct cpfl_vport *cpfl_vport = (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter_base = vport->adapter;
+ uint16_t logic_qid = cpfl_vport->nb_p2p_rxq;
+ struct cpfl_rxq_hairpin_info *hairpin_info;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct idpf_rx_queue *bufq1 = NULL;
+ struct idpf_rx_queue *rxq;
+ uint16_t peer_port, peer_q;
+ uint16_t qid;
+ int ret;
+
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue.");
+ return -EINVAL;
+ }
+
+ if (conf->peer_count != 1) {
+ PMD_INIT_LOG(ERR, "Can't support Rx hairpin queue peer count %d", conf->peer_count);
+ return -EINVAL;
+ }
+
+ peer_port = conf->peers[0].port;
+ peer_q = conf->peers[0].queue;
+
+ if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
+ nb_desc > CPFL_MAX_RING_DESC ||
+ nb_desc < CPFL_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is invalid", nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory if needed */
+ if (dev->data->rx_queues[queue_idx]) {
+ cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* Setup Rx description queue */
+ cpfl_rxq = rte_zmalloc_socket("cpfl hairpin rxq",
+ sizeof(struct cpfl_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!cpfl_rxq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
+ return -ENOMEM;
+ }
+
+ rxq = &cpfl_rxq->base;
+ hairpin_info = &cpfl_rxq->hairpin_info;
+ rxq->nb_rx_desc = nb_desc * 2;
+ rxq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info.rx_start_qid, logic_qid);
+ rxq->port_id = dev->data->port_id;
+ rxq->adapter = adapter_base;
+ rxq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
+ hairpin_info->hairpin_q = true;
+ hairpin_info->peer_txp = peer_port;
+ hairpin_info->peer_txq_id = peer_q;
+
+ if (conf->manual_bind != 0)
+ cpfl_vport->p2p_manual_bind = true;
+ else
+ cpfl_vport->p2p_manual_bind = false;
+
+ /* setup 1 Rx buffer queue for the 1st hairpin rxq */
+ if (logic_qid == 0) {
+ bufq1 = rte_zmalloc_socket("hairpin rx bufq1",
+ sizeof(struct idpf_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!bufq1) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for hairpin Rx buffer queue 1.");
+ ret = -ENOMEM;
+ goto err_alloc_bufq1;
+ }
+ qid = 2 * logic_qid;
+ ret = cpfl_rx_hairpin_bufq_setup(dev, bufq1, qid, nb_desc);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to setup hairpin Rx buffer queue 1");
+ ret = -EINVAL;
+ goto err_setup_bufq1;
+ }
+ cpfl_vport->p2p_rx_bufq = bufq1;
+ }
+
+ rxq->bufq1 = cpfl_vport->p2p_rx_bufq;
+ rxq->bufq2 = NULL;
+
+ cpfl_vport->nb_p2p_rxq++;
+ rxq->q_set = true;
+ dev->data->rx_queues[queue_idx] = cpfl_rxq;
+
+ return 0;
+
+err_setup_bufq1:
+ rte_free(bufq1);
+err_alloc_bufq1:
+ rte_free(rxq);
+
+ return ret;
+}
+
+int
+cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter_base = vport->adapter;
+ uint16_t logic_qid = cpfl_vport->nb_p2p_txq;
+ struct cpfl_txq_hairpin_info *hairpin_info;
+ struct idpf_hw *hw = &adapter_base->hw;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct idpf_tx_queue *txq, *cq;
+ const struct rte_memzone *mz;
+ uint32_t ring_size;
+ uint16_t peer_port, peer_q;
+
+ if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue.");
+ return -EINVAL;
+ }
+
+ if (conf->peer_count != 1) {
+ PMD_INIT_LOG(ERR, "Can't support Tx hairpin queue peer count %d", conf->peer_count);
+ return -EINVAL;
+ }
+
+ peer_port = conf->peers[0].port;
+ peer_q = conf->peers[0].queue;
+
+ if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
+ nb_desc > CPFL_MAX_RING_DESC ||
+ nb_desc < CPFL_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is invalid",
+ nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory if needed. */
+ if (dev->data->tx_queues[queue_idx]) {
+ cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate the TX queue data structure. */
+ cpfl_txq = rte_zmalloc_socket("cpfl hairpin txq",
+ sizeof(struct cpfl_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!cpfl_txq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+ return -ENOMEM;
+ }
+
+ txq = &cpfl_txq->base;
+ hairpin_info = &cpfl_txq->hairpin_info;
+ /* Txq ring length should be 2 times of Tx completion queue size. */
+ txq->nb_tx_desc = nb_desc * 2;
+ txq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info.tx_start_qid, logic_qid);
+ txq->port_id = dev->data->port_id;
+ hairpin_info->hairpin_q = true;
+ hairpin_info->peer_rxp = peer_port;
+ hairpin_info->peer_rxq_id = peer_q;
+
+ if (conf->manual_bind != 0)
+ cpfl_vport->p2p_manual_bind = true;
+ else
+ cpfl_vport->p2p_manual_bind = false;
+
+ /* Always Tx hairpin queue allocates Tx HW ring */
+ ring_size = RTE_ALIGN(txq->nb_tx_desc * CPFL_P2P_DESC_LEN,
+ CPFL_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_ring", logic_qid,
+ ring_size + CPFL_P2P_RING_BUF,
+ CPFL_RING_BASE_ALIGN,
+ dev->device->numa_node);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+ return -ENOMEM;
+ }
+
+ txq->tx_ring_phys_addr = mz->iova;
+ txq->desc_ring = mz->addr;
+ txq->mz = mz;
+
+ cpfl_tx_hairpin_descq_reset(txq);
+ txq->qtx_tail = hw->hw_addr +
+ cpfl_hw_qtail_get(cpfl_vport->p2p_q_chunks_info.tx_qtail_start,
+ logic_qid, cpfl_vport->p2p_q_chunks_info.tx_qtail_spacing);
+ txq->ops = &def_txq_ops;
+
+ if (cpfl_vport->p2p_tx_complq == NULL) {
+ cq = rte_zmalloc_socket("cpfl hairpin cq",
+ sizeof(struct idpf_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ dev->device->numa_node);
+ if (!cq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+ return -ENOMEM;
+ }
+
+ cq->nb_tx_desc = nb_desc;
+ cq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info.tx_compl_start_qid, 0);
+ cq->port_id = dev->data->port_id;
+
+ /* Tx completion queue always allocates the HW ring */
+ ring_size = RTE_ALIGN(cq->nb_tx_desc * CPFL_P2P_DESC_LEN,
+ CPFL_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_compl_ring", logic_qid,
+ ring_size + CPFL_P2P_RING_BUF,
+ CPFL_RING_BASE_ALIGN,
+ dev->device->numa_node);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX completion queue");
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+ return -ENOMEM;
+ }
+ cq->tx_ring_phys_addr = mz->iova;
+ cq->compl_ring = mz->addr;
+ cq->mz = mz;
+
+ cpfl_tx_hairpin_complq_reset(cq);
+ cpfl_vport->p2p_tx_complq = cq;
+ }
+
+ txq->complq = cpfl_vport->p2p_tx_complq;
+
+ cpfl_vport->nb_p2p_txq++;
+ txq->q_set = true;
+ dev->data->tx_queues[queue_idx] = cpfl_txq;
+
+ return 0;
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
@@ -864,6 +1233,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ continue;
(void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 3a87a1f4b3..5e9f2dada7 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -13,6 +13,7 @@
#define CPFL_MIN_RING_DESC 32
#define CPFL_MAX_RING_DESC 4096
#define CPFL_DMA_MEM_ALIGN 4096
+#define CPFL_P2P_DESC_LEN 16
#define CPFL_MAX_HAIRPINQ_RX_2_TX 1
#define CPFL_MAX_HAIRPINQ_TX_2_RX 1
#define CPFL_MAX_HAIRPINQ_NB_DESC 1024
@@ -21,6 +22,10 @@
#define CPFL_P2P_NB_TX_COMPLQ 1
#define CPFL_P2P_NB_QUEUE_GRPS 1
#define CPFL_P2P_QUEUE_GRP_ID 1
+#define CPFL_P2P_NB_MBUF 4096
+#define CPFL_P2P_CACHE_SIZE 250
+#define CPFL_P2P_MBUF_SIZE 2048
+#define CPFL_P2P_RING_BUF 128
/* Base address of the HW descriptor ring should be 128B aligned. */
#define CPFL_RING_BASE_ALIGN 128
@@ -31,12 +36,26 @@
#define CPFL_SUPPORT_CHAIN_NUM 5
+struct cpfl_rxq_hairpin_info {
+ bool hairpin_q; /* if rx queue is a hairpin queue */
+ uint16_t peer_txp;
+ uint16_t peer_txq_id;
+};
+
struct cpfl_rx_queue {
struct idpf_rx_queue base;
+ struct cpfl_rxq_hairpin_info hairpin_info;
+};
+
+struct cpfl_txq_hairpin_info {
+ bool hairpin_q; /* if tx queue is a hairpin queue */
+ uint16_t peer_rxp;
+ uint16_t peer_rxq_id;
};
struct cpfl_tx_queue {
struct idpf_tx_queue base;
+ struct cpfl_txq_hairpin_info hairpin_info;
};
int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
@@ -57,4 +76,11 @@ void cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void cpfl_set_rx_function(struct rte_eth_dev *dev);
void cpfl_set_tx_function(struct rte_eth_dev *dev);
+uint16_t cpfl_hw_qid_get(uint16_t start_qid, uint16_t offset);
+uint64_t cpfl_hw_qtail_get(uint64_t tail_start, uint16_t offset, uint64_t tail_spacing);
+int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, const struct rte_eth_hairpin_conf *conf);
+int cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf);
#endif /* _CPFL_RXTX_H_ */
diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
index 5690b17911..d8e9191196 100644
--- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
+++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
@@ -85,6 +85,8 @@ cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)
cpfl_rxq = dev->data->rx_queues[i];
default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ continue;
splitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq->base);
ret = splitq_ret && default_ret;
} else {
@@ -106,6 +108,8 @@ cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_tx_queues; i++) {
cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq->hairpin_info.hairpin_q)
+ continue;
ret = cpfl_tx_vec_queue_default(&cpfl_txq->base);
if (ret == CPFL_SCALAR_PATH)
return CPFL_SCALAR_PATH;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v2 06/10] net/cpfl: support hairpin queue configuration
2023-05-19 5:10 ` [PATCH v2 00/10] add hairpin queue support beilei.xing
` (5 preceding siblings ...)
2023-05-19 5:10 ` [PATCH v2 05/10] net/cpfl: support hairpin queue setup and release beilei.xing
@ 2023-05-19 5:10 ` beilei.xing
2023-05-19 5:10 ` [PATCH v2 07/10] net/cpfl: support hairpin queue start/stop beilei.xing
` (4 subsequent siblings)
11 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-19 5:10 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx hairpin queue configuration.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 70 +++++++++++
drivers/common/idpf/idpf_common_virtchnl.h | 6 +
drivers/common/idpf/version.map | 2 +
drivers/net/cpfl/cpfl_ethdev.c | 136 ++++++++++++++++++++-
drivers/net/cpfl/cpfl_rxtx.c | 80 ++++++++++++
drivers/net/cpfl/cpfl_rxtx.h | 7 ++
6 files changed, 295 insertions(+), 6 deletions(-)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index 5ce8bb76ad..c1769a11e0 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -1050,6 +1050,41 @@ idpf_vc_rxq_config(struct idpf_vport *vport, struct idpf_rx_queue *rxq)
return err;
}
+int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct virtchnl2_rxq_info *rxq_info,
+ uint16_t num_qs)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+ struct idpf_cmd_info args;
+ int size, err, i;
+
+ size = sizeof(*vc_rxqs) + (num_qs - 1) *
+ sizeof(struct virtchnl2_rxq_info);
+ vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+ if (vc_rxqs == NULL) {
+ DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+ err = -ENOMEM;
+ return err;
+ }
+ vc_rxqs->vport_id = vport->vport_id;
+ vc_rxqs->num_qinfo = num_qs;
+ memcpy(vc_rxqs->qinfo, rxq_info, num_qs * sizeof(struct virtchnl2_rxq_info));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+ args.in_args = (uint8_t *)vc_rxqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ rte_free(vc_rxqs);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+ return err;
+}
+
int
idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq)
{
@@ -1121,6 +1156,41 @@ idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq)
return err;
}
+int
+idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
+ uint16_t num_qs)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+ struct idpf_cmd_info args;
+ int size, err;
+
+ size = sizeof(*vc_txqs) + (num_qs - 1) * sizeof(struct virtchnl2_txq_info);
+ vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+ if (vc_txqs == NULL) {
+ DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+ err = -ENOMEM;
+ return err;
+ }
+ vc_txqs->vport_id = vport->vport_id;
+ vc_txqs->num_qinfo = num_qs;
+ memcpy(vc_txqs->qinfo, txq_info, num_qs * sizeof(struct virtchnl2_txq_info));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+ args.in_args = (uint8_t *)vc_txqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ rte_free(vc_txqs);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+ return err;
+}
+
int
idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
struct idpf_ctlq_msg *q_msg)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index bf1d014c8d..277235ba7d 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -65,6 +65,12 @@ __rte_internal
int idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
u16 *buff_count, struct idpf_dma_mem **buffs);
__rte_internal
+int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct virtchnl2_rxq_info *rxq_info,
+ uint16_t num_qs);
+__rte_internal
+int idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
+ uint16_t num_qs);
+__rte_internal
int idpf_vc_queue_grps_del(struct idpf_vport *vport,
uint16_t num_q_grps,
struct virtchnl2_queue_group_id *qg_ids);
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index aa67f7ee27..a339a4bf8e 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -59,8 +59,10 @@ INTERNAL {
idpf_vc_rss_lut_get;
idpf_vc_rss_lut_set;
idpf_vc_rxq_config;
+ idpf_vc_rxq_config_by_info;
idpf_vc_stats_query;
idpf_vc_txq_config;
+ idpf_vc_txq_config_by_info;
idpf_vc_vectors_alloc;
idpf_vc_vectors_dealloc;
idpf_vc_vport_create;
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 6e1cfd78bb..61d218b589 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -737,33 +737,157 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
return idpf_vport_irq_map_config(vport, nb_rx_queues);
}
+/* Update hairpin_info for dev's tx hairpin queue */
+static int
+cpfl_txq_hairpin_info_update(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port];
+ struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private;
+ struct cpfl_txq_hairpin_info *hairpin_info;
+ struct cpfl_tx_queue *cpfl_txq;
+ int i;
+
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ hairpin_info = &cpfl_txq->hairpin_info;
+ if (hairpin_info->peer_rxp != rx_port) {
+ PMD_DRV_LOG(ERR, "port %d is not the peer port", rx_port);
+ return -EINVAL;
+ }
+ hairpin_info->peer_rxq_id =
+ cpfl_hw_qid_get(cpfl_rx_vport->p2p_q_chunks_info.rx_start_qid,
+ hairpin_info->peer_rxq_id - cpfl_rx_vport->nb_data_rxq);
+ }
+
+ return 0;
+}
+
+/* Bind Rx hairpin queue's memory zone to peer Tx hairpin queue's memory zone */
+static void
+cpfl_rxq_hairpin_mz_bind(struct rte_eth_dev *dev)
+{
+ struct cpfl_vport *cpfl_rx_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_rx_vport->base;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_hw *hw = &adapter->hw;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct rte_eth_dev *peer_dev;
+ const struct rte_memzone *mz;
+ uint16_t peer_tx_port;
+ uint16_t peer_tx_qid;
+ int i;
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ peer_tx_port = cpfl_rxq->hairpin_info.peer_txp;
+ peer_tx_qid = cpfl_rxq->hairpin_info.peer_txq_id;
+ peer_dev = &rte_eth_devices[peer_tx_port];
+ cpfl_txq = peer_dev->data->tx_queues[peer_tx_qid];
+
+ /* bind rx queue */
+ mz = cpfl_txq->base.mz;
+ cpfl_rxq->base.rx_ring_phys_addr = mz->iova;
+ cpfl_rxq->base.rx_ring = mz->addr;
+ cpfl_rxq->base.mz = mz;
+
+ /* bind rx buffer queue */
+ mz = cpfl_txq->base.complq->mz;
+ cpfl_rxq->base.bufq1->rx_ring_phys_addr = mz->iova;
+ cpfl_rxq->base.bufq1->rx_ring = mz->addr;
+ cpfl_rxq->base.bufq1->mz = mz;
+ cpfl_rxq->base.bufq1->qrx_tail = hw->hw_addr +
+ cpfl_hw_qtail_get(cpfl_rx_vport->p2p_q_chunks_info.rx_buf_qtail_start,
+ 0, cpfl_rx_vport->p2p_q_chunks_info.rx_buf_qtail_spacing);
+ }
+}
+
static int
cpfl_start_queues(struct rte_eth_dev *dev)
{
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_rx_queue *cpfl_rxq;
struct cpfl_tx_queue *cpfl_txq;
+ int update_flag = 0;
int err = 0;
int i;
+ /* For normal data queues, configure, init and enale Txq.
+ * For non-manual bind hairpin queues, configure Txq.
+ */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
cpfl_txq = dev->data->tx_queues[i];
if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
continue;
- err = cpfl_tx_queue_start(dev, i);
+ if (!cpfl_txq->hairpin_info.hairpin_q) {
+ err = cpfl_tx_queue_start(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
+ return err;
+ }
+ } else if (!cpfl_vport->p2p_manual_bind) {
+ if (update_flag == 0) {
+ err = cpfl_txq_hairpin_info_update(dev,
+ cpfl_txq->hairpin_info.peer_rxp);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to update Tx hairpin queue info");
+ return err;
+ }
+ update_flag = 1;
+ }
+ err = cpfl_hairpin_txq_config(vport, cpfl_txq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u", i);
+ return err;
+ }
+ }
+ }
+
+ /* For non-manual bind hairpin queues, configure Tx completion queue first.*/
+ if (!cpfl_vport->p2p_manual_bind && cpfl_vport->p2p_tx_complq != NULL) {
+ err = cpfl_hairpin_tx_complq_config(cpfl_vport);
if (err != 0) {
- PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
+ PMD_DRV_LOG(ERR, "Fail to config Tx completion queue");
return err;
}
}
+ /* For non-manual bind hairpin queues, configure Rx buffer queue.*/
+ if (!cpfl_vport->p2p_manual_bind && cpfl_vport->p2p_rx_bufq != NULL) {
+ cpfl_rxq_hairpin_mz_bind(dev);
+ err = cpfl_hairpin_rx_bufq_config(cpfl_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue");
+ return err;
+ }
+ }
+
+ /* For normal data queues, configure, init and enale Rxq.
+ * For non-manual bind hairpin queues, configure Rxq, and then init Rxq.
+ */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)
continue;
- err = cpfl_rx_queue_start(dev, i);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
- return err;
+ if (!cpfl_rxq->hairpin_info.hairpin_q) {
+ err = cpfl_rx_queue_start(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
+ return err;
+ }
+ } else if (!cpfl_vport->p2p_manual_bind) {
+ err = cpfl_hairpin_rxq_config(vport, cpfl_rxq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue %u", i);
+ return err;
+ }
+ err = cpfl_rx_queue_init(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u", i);
+ return err;
+ }
}
}
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index eaec16a9f7..9b757f6b63 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -930,6 +930,86 @@ cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return 0;
}
+int
+cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport)
+{
+ struct idpf_rx_queue *rx_bufq = cpfl_vport->p2p_rx_bufq;
+ struct virtchnl2_rxq_info rxq_info[1] = {0};
+
+ rxq_info[0].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ rxq_info[0].queue_id = rx_bufq->queue_id;
+ rxq_info[0].ring_len = rx_bufq->nb_rx_desc;
+ rxq_info[0].dma_ring_addr = rx_bufq->rx_ring_phys_addr;
+ rxq_info[0].desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ rxq_info[0].rx_buffer_low_watermark = CPFL_RXBUF_LOW_WATERMARK;
+ rxq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ rxq_info[0].data_buffer_size = rx_bufq->rx_buf_len;
+ rxq_info[0].buffer_notif_stride = CPFL_RX_BUF_STRIDE;
+
+ return idpf_vc_rxq_config_by_info(&cpfl_vport->base, rxq_info, 1);
+}
+
+int
+cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq)
+{
+ struct virtchnl2_rxq_info rxq_info[1] = {0};
+ struct idpf_rx_queue *rxq = &cpfl_rxq->base;
+
+ rxq_info[0].type = VIRTCHNL2_QUEUE_TYPE_RX;
+ rxq_info[0].queue_id = rxq->queue_id;
+ rxq_info[0].ring_len = rxq->nb_rx_desc;
+ rxq_info[0].dma_ring_addr = rxq->rx_ring_phys_addr;
+ rxq_info[0].rx_bufq1_id = rxq->bufq1->queue_id;
+ rxq_info[0].max_pkt_size = vport->max_pkt_len;
+ rxq_info[0].desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ rxq_info[0].qflags |= VIRTCHNL2_RX_DESC_SIZE_16BYTE;
+
+ rxq_info[0].data_buffer_size = rxq->rx_buf_len;
+ rxq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ rxq_info[0].rx_buffer_low_watermark = CPFL_RXBUF_LOW_WATERMARK;
+
+ PMD_DRV_LOG(NOTICE, "hairpin: vport %u, Rxq id 0x%x",
+ vport->vport_id, rxq_info[0].queue_id);
+
+ return idpf_vc_rxq_config_by_info(vport, rxq_info, 1);
+}
+
+int
+cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport)
+{
+ struct idpf_tx_queue *tx_complq = cpfl_vport->p2p_tx_complq;
+ struct virtchnl2_txq_info txq_info[1] = {0};
+
+ txq_info[0].dma_ring_addr = tx_complq->tx_ring_phys_addr;
+ txq_info[0].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ txq_info[0].queue_id = tx_complq->queue_id;
+ txq_info[0].ring_len = tx_complq->nb_tx_desc;
+ txq_info[0].peer_rx_queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+ txq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ txq_info[0].sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+
+ return idpf_vc_txq_config_by_info(&cpfl_vport->base, txq_info, 1);
+}
+
+int
+cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq)
+{
+ struct idpf_tx_queue *txq = &cpfl_txq->base;
+ struct virtchnl2_txq_info txq_info[1] = {0};
+
+ txq_info[0].dma_ring_addr = txq->tx_ring_phys_addr;
+ txq_info[0].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ txq_info[0].queue_id = txq->queue_id;
+ txq_info[0].ring_len = txq->nb_tx_desc;
+ txq_info[0].tx_compl_queue_id = txq->complq->queue_id;
+ txq_info[0].relative_queue_id = txq->queue_id;
+ txq_info[0].peer_rx_queue_id = cpfl_txq->hairpin_info.peer_rxq_id;
+ txq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ txq_info[0].sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+
+ return idpf_vc_txq_config_by_info(vport, txq_info, 1);
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 5e9f2dada7..5e2ddd1ef9 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -30,12 +30,15 @@
#define CPFL_RING_BASE_ALIGN 128
#define CPFL_DEFAULT_RX_FREE_THRESH 32
+#define CPFL_RXBUF_LOW_WATERMARK 64
#define CPFL_DEFAULT_TX_RS_THRESH 32
#define CPFL_DEFAULT_TX_FREE_THRESH 32
#define CPFL_SUPPORT_CHAIN_NUM 5
+#define CPFL_RX_BUF_STRIDE 64
+
struct cpfl_rxq_hairpin_info {
bool hairpin_q; /* if rx queue is a hairpin queue */
uint16_t peer_txp;
@@ -83,4 +86,8 @@ int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
int cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc,
const struct rte_eth_hairpin_conf *conf);
+int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport);
+int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq);
+int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
+int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
#endif /* _CPFL_RXTX_H_ */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v2 07/10] net/cpfl: support hairpin queue start/stop
2023-05-19 5:10 ` [PATCH v2 00/10] add hairpin queue support beilei.xing
` (6 preceding siblings ...)
2023-05-19 5:10 ` [PATCH v2 06/10] net/cpfl: support hairpin queue configuration beilei.xing
@ 2023-05-19 5:10 ` beilei.xing
2023-05-19 5:10 ` [PATCH v2 08/10] net/cpfl: enable write back based on ITR expire beilei.xing
` (3 subsequent siblings)
11 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-19 5:10 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx hairpin queue start/stop.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 2 +-
drivers/common/idpf/idpf_common_virtchnl.h | 3 +
drivers/common/idpf/version.map | 1 +
drivers/net/cpfl/cpfl_ethdev.c | 41 ++++++
drivers/net/cpfl/cpfl_rxtx.c | 153 ++++++++++++++++++---
drivers/net/cpfl/cpfl_rxtx.h | 14 ++
6 files changed, 195 insertions(+), 19 deletions(-)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index c1769a11e0..fa3d17738c 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -733,7 +733,7 @@ idpf_vc_vectors_dealloc(struct idpf_vport *vport)
return err;
}
-static int
+int
idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
uint32_t type, bool on)
{
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index 277235ba7d..18db6cd8c8 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -71,6 +71,9 @@ __rte_internal
int idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
uint16_t num_qs);
__rte_internal
+int idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
+ uint32_t type, bool on);
+__rte_internal
int idpf_vc_queue_grps_del(struct idpf_vport *vport,
uint16_t num_q_grps,
struct virtchnl2_queue_group_id *qg_ids);
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index a339a4bf8e..0e87dba2ae 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -45,6 +45,7 @@ INTERNAL {
idpf_vc_cmd_execute;
idpf_vc_ctlq_post_rx_buffs;
idpf_vc_ctlq_recv;
+ idpf_vc_ena_dis_one_queue;
idpf_vc_irq_map_unmap_config;
idpf_vc_one_msg_read;
idpf_vc_ptype_info_query;
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 61d218b589..fe34b38c45 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -891,6 +891,47 @@ cpfl_start_queues(struct rte_eth_dev *dev)
}
}
+ /* For non-manual bind hairpin queues, enable Tx queue and Rx queue,
+ * then enable Tx completion queue and Rx buffer queue.
+ */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq->hairpin_info.hairpin_q && !cpfl_vport->p2p_manual_bind) {
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ i - cpfl_vport->nb_data_txq,
+ false, true);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u on",
+ i);
+ else
+ cpfl_txq->base.q_started = true;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q && !cpfl_vport->p2p_manual_bind) {
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ i - cpfl_vport->nb_data_rxq,
+ true, true);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u on",
+ i);
+ else
+ cpfl_rxq->base.q_started = true;
+ }
+ }
+
+ if (!cpfl_vport->p2p_manual_bind &&
+ cpfl_vport->p2p_tx_complq != NULL &&
+ cpfl_vport->p2p_rx_bufq != NULL) {
+ err = cpfl_switch_hairpin_bufq_complq(cpfl_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq and Rx bufq");
+ return err;
+ }
+ }
+
return err;
}
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 9b757f6b63..ea98fc9407 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -1010,6 +1010,83 @@ cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq
return idpf_vc_txq_config_by_info(vport, txq_info, 1);
}
+int
+cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ queue_id = cpfl_vport->p2p_tx_complq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+ if (err)
+ return err;
+
+ type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
+int
+cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t logic_qid,
+ bool rx, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = rx ? VIRTCHNL2_QUEUE_TYPE_RX : VIRTCHNL2_QUEUE_TYPE_TX;
+
+ if (type == VIRTCHNL2_QUEUE_TYPE_RX)
+ queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info.rx_start_qid, logic_qid);
+ else
+ queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info.tx_start_qid, logic_qid);
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+ if (err)
+ return err;
+
+ return err;
+}
+
+static int
+cpfl_alloc_split_p2p_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+ volatile struct virtchnl2_p2p_rx_buf_desc *rxd;
+ struct rte_mbuf *mbuf = NULL;
+ uint64_t dma_addr;
+ uint16_t i;
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ mbuf = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!mbuf)) {
+ PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+ return -ENOMEM;
+ }
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+ rxd = &((volatile struct virtchnl2_p2p_rx_buf_desc *)(rxq->rx_ring))[i];
+ rxd->reserve0 = 0;
+ rxd->pkt_addr = dma_addr;
+
+ rxq->sw_ring[i] = mbuf;
+ }
+
+ rxq->nb_rx_hold = 0;
+ /* The value written in the RX buffer queue tail register, must be a multiple of 8.*/
+ rxq->rx_tail = rxq->nb_rx_desc - CPFL_HAIRPIN_Q_TAIL_AUX_VALUE;
+
+ return 0;
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
@@ -1063,22 +1140,31 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
IDPF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
} else {
/* Split queue */
- err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
- return err;
- }
- err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
- return err;
+ if (cpfl_rxq->hairpin_info.hairpin_q) {
+ err = cpfl_alloc_split_p2p_rxq_mbufs(rxq->bufq1);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate p2p RX buffer queue mbuf");
+ return err;
+ }
+ } else {
+ err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
+ return err;
+ }
+ err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
+ return err;
+ }
}
rte_wmb();
/* Init the RX tail register. */
IDPF_PCI_REG_WRITE(rxq->bufq1->qrx_tail, rxq->bufq1->rx_tail);
- IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail);
+ if (rxq->bufq2)
+ IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail);
}
return err;
@@ -1185,7 +1271,12 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return -EINVAL;
cpfl_rxq = dev->data->rx_queues[rx_queue_id];
- err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ rx_queue_id - cpfl_vport->nb_data_txq,
+ true, false);
+ else
+ err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
rx_queue_id);
@@ -1199,10 +1290,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
idpf_qc_single_rx_queue_reset(rxq);
} else {
rxq->bufq1->ops->release_mbufs(rxq->bufq1);
- rxq->bufq2->ops->release_mbufs(rxq->bufq2);
- idpf_qc_split_rx_queue_reset(rxq);
+ if (rxq->bufq2)
+ rxq->bufq2->ops->release_mbufs(rxq->bufq2);
+ if (cpfl_rxq->hairpin_info.hairpin_q) {
+ cpfl_rx_hairpin_descq_reset(rxq);
+ cpfl_rx_hairpin_bufq_reset(rxq->bufq1);
+ } else {
+ idpf_qc_split_rx_queue_reset(rxq);
+ }
}
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ if (!cpfl_rxq->hairpin_info.hairpin_q)
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1221,7 +1319,12 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
cpfl_txq = dev->data->tx_queues[tx_queue_id];
- err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
+ if (cpfl_txq->hairpin_info.hairpin_q)
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ tx_queue_id - cpfl_vport->nb_data_txq,
+ false, false);
+ else
+ err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
tx_queue_id);
@@ -1234,10 +1337,17 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
idpf_qc_single_tx_queue_reset(txq);
} else {
- idpf_qc_split_tx_descq_reset(txq);
- idpf_qc_split_tx_complq_reset(txq->complq);
+ if (cpfl_txq->hairpin_info.hairpin_q) {
+ cpfl_tx_hairpin_descq_reset(txq);
+ cpfl_tx_hairpin_complq_reset(txq->complq);
+ } else {
+ idpf_qc_split_tx_descq_reset(txq);
+ idpf_qc_split_tx_complq_reset(txq->complq);
+ }
}
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ if (!cpfl_txq->hairpin_info.hairpin_q)
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1257,10 +1367,17 @@ cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
void
cpfl_stop_queues(struct rte_eth_dev *dev)
{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
struct cpfl_rx_queue *cpfl_rxq;
struct cpfl_tx_queue *cpfl_txq;
int i;
+ if (cpfl_vport->p2p_rx_bufq != NULL) {
+ if (cpfl_switch_hairpin_bufq_complq(cpfl_vport, false) != 0)
+ PMD_DRV_LOG(ERR, "Failed to stop hairpin Tx complq and Rx bufq");
+ }
+
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
if (cpfl_rxq == NULL)
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 5e2ddd1ef9..b961aad4ce 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -39,6 +39,17 @@
#define CPFL_RX_BUF_STRIDE 64
+/* The value written in the RX buffer queue tail register,
+ * and in WritePTR field in the TX completion queue context,
+ * must be a multiple of 8.
+ */
+#define CPFL_HAIRPIN_Q_TAIL_AUX_VALUE 8
+
+struct virtchnl2_p2p_rx_buf_desc {
+ __le64 reserve0;
+ __le64 pkt_addr; /* Packet buffer address */
+};
+
struct cpfl_rxq_hairpin_info {
bool hairpin_q; /* if rx queue is a hairpin queue */
uint16_t peer_txp;
@@ -90,4 +101,7 @@ int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq);
int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
+int cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t qid,
+ bool rx, bool on);
#endif /* _CPFL_RXTX_H_ */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v2 08/10] net/cpfl: enable write back based on ITR expire
2023-05-19 5:10 ` [PATCH v2 00/10] add hairpin queue support beilei.xing
` (7 preceding siblings ...)
2023-05-19 5:10 ` [PATCH v2 07/10] net/cpfl: support hairpin queue start/stop beilei.xing
@ 2023-05-19 5:10 ` beilei.xing
2023-05-19 5:10 ` [PATCH v2 09/10] net/cpfl: support peer ports get beilei.xing
` (2 subsequent siblings)
11 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-19 5:10 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch enabls write back based on ITR expire
(WR_ON_ITR) for hairpin queue.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_device.c | 75 ++++++++++++++++++++++++
drivers/common/idpf/idpf_common_device.h | 4 ++
drivers/common/idpf/version.map | 1 +
drivers/net/cpfl/cpfl_ethdev.c | 13 +++-
4 files changed, 92 insertions(+), 1 deletion(-)
diff --git a/drivers/common/idpf/idpf_common_device.c b/drivers/common/idpf/idpf_common_device.c
index 3b58bdd41e..86a4a54f9b 100644
--- a/drivers/common/idpf/idpf_common_device.c
+++ b/drivers/common/idpf/idpf_common_device.c
@@ -559,6 +559,81 @@ idpf_vport_irq_map_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
return ret;
}
+int
+idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport, uint32_t *qids, uint16_t nb_rx_queues)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_queue_vector *qv_map;
+ struct idpf_hw *hw = &adapter->hw;
+ uint32_t dynctl_val, itrn_val;
+ uint32_t dynctl_reg_start;
+ uint32_t itrn_reg_start;
+ uint16_t i;
+ int ret;
+
+ qv_map = rte_zmalloc("qv_map",
+ nb_rx_queues *
+ sizeof(struct virtchnl2_queue_vector), 0);
+ if (qv_map == NULL) {
+ DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
+ nb_rx_queues);
+ ret = -ENOMEM;
+ goto qv_map_alloc_err;
+ }
+
+ /* Rx interrupt disabled, Map interrupt only for writeback */
+
+ /* The capability flags adapter->caps.other_caps should be
+ * compared with bit VIRTCHNL2_CAP_WB_ON_ITR here. The if
+ * condition should be updated when the FW can return the
+ * correct flag bits.
+ */
+ dynctl_reg_start =
+ vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
+ itrn_reg_start =
+ vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
+ dynctl_val = IDPF_READ_REG(hw, dynctl_reg_start);
+ DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x", dynctl_val);
+ itrn_val = IDPF_READ_REG(hw, itrn_reg_start);
+ DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val);
+ /* Force write-backs by setting WB_ON_ITR bit in DYN_CTL
+ * register. WB_ON_ITR and INTENA are mutually exclusive
+ * bits. Setting WB_ON_ITR bits means TX and RX Descs
+ * are written back based on ITR expiration irrespective
+ * of INTENA setting.
+ */
+ /* TBD: need to tune INTERVAL value for better performance. */
+ itrn_val = (itrn_val == 0) ? IDPF_DFLT_INTERVAL : itrn_val;
+ dynctl_val = VIRTCHNL2_ITR_IDX_0 <<
+ PF_GLINT_DYN_CTL_ITR_INDX_S |
+ PF_GLINT_DYN_CTL_WB_ON_ITR_M |
+ itrn_val << PF_GLINT_DYN_CTL_INTERVAL_S;
+ IDPF_WRITE_REG(hw, dynctl_reg_start, dynctl_val);
+
+ for (i = 0; i < nb_rx_queues; i++) {
+ /* map all queues to the same vector */
+ qv_map[i].queue_id = qids[i];
+ qv_map[i].vector_id =
+ vport->recv_vectors->vchunks.vchunks->start_vector_id;
+ }
+ vport->qv_map = qv_map;
+
+ ret = idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, true);
+ if (ret != 0) {
+ DRV_LOG(ERR, "config interrupt mapping failed");
+ goto config_irq_map_err;
+ }
+
+ return 0;
+
+config_irq_map_err:
+ rte_free(vport->qv_map);
+ vport->qv_map = NULL;
+
+qv_map_alloc_err:
+ return ret;
+}
+
int
idpf_vport_irq_unmap_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
{
diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index 7cf2355bc9..1aa9d9516f 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -212,5 +212,9 @@ int idpf_vport_info_init(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_info);
__rte_internal
void idpf_vport_stats_update(struct virtchnl2_vport_stats *oes, struct virtchnl2_vport_stats *nes);
+__rte_internal
+int idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport,
+ uint32_t *qids,
+ uint16_t nb_rx_queues);
#endif /* _IDPF_COMMON_DEVICE_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 0e87dba2ae..e3a7ef0daa 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -74,6 +74,7 @@ INTERNAL {
idpf_vport_info_init;
idpf_vport_init;
idpf_vport_irq_map_config;
+ idpf_vport_irq_map_config_by_qids;
idpf_vport_irq_unmap_config;
idpf_vport_rss_config;
idpf_vport_stats_update;
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index fe34b38c45..2936d23be7 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -730,11 +730,22 @@ cpfl_dev_configure(struct rte_eth_dev *dev)
static int
cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
{
+ uint32_t qids[CPFL_MAX_P2P_NB_QUEUES + IDPF_DEFAULT_RXQ_NUM] = {0};
struct cpfl_vport *cpfl_vport = dev->data->dev_private;
struct idpf_vport *vport = &cpfl_vport->base;
uint16_t nb_rx_queues = dev->data->nb_rx_queues;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i;
- return idpf_vport_irq_map_config(vport, nb_rx_queues);
+ for (i = 0; i < nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ qids[i] = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info.rx_start_qid,
+ (i - cpfl_vport->nb_data_rxq));
+ else
+ qids[i] = cpfl_hw_qid_get(vport->chunks_info.rx_start_qid, i);
+ }
+ return idpf_vport_irq_map_config_by_qids(vport, qids, nb_rx_queues);
}
/* Update hairpin_info for dev's tx hairpin queue */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v2 09/10] net/cpfl: support peer ports get
2023-05-19 5:10 ` [PATCH v2 00/10] add hairpin queue support beilei.xing
` (8 preceding siblings ...)
2023-05-19 5:10 ` [PATCH v2 08/10] net/cpfl: enable write back based on ITR expire beilei.xing
@ 2023-05-19 5:10 ` beilei.xing
2023-05-19 5:10 ` [PATCH v2 10/10] net/cpfl: support hairpin bind/unbind beilei.xing
2023-05-19 7:31 ` [PATCH v3 00/10] net/cpfl: add hairpin queue support beilei.xing
11 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-19 5:10 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports get hairpin peer ports.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 34 ++++++++++++++++++++++++++++++++++
1 file changed, 34 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 2936d23be7..5f42728df1 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1073,6 +1073,39 @@ cpfl_dev_close(struct rte_eth_dev *dev)
return 0;
}
+static int
+cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
+ __rte_unused size_t len, uint32_t tx)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_tx_queue *txq;
+ struct idpf_rx_queue *rxq;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i, j;
+
+ if (tx > 0) {
+ for (i = cpfl_vport->nb_data_txq, j = 0; i < dev->data->nb_tx_queues; i++, j++) {
+ txq = dev->data->tx_queues[i];
+ if (txq == NULL)
+ return -EINVAL;
+ cpfl_txq = (struct cpfl_tx_queue *)txq;
+ peer_ports[j] = cpfl_txq->hairpin_info.peer_rxp;
+ }
+ } else if (tx == 0) {
+ for (i = cpfl_vport->nb_data_rxq, j = 0; i < dev->data->nb_rx_queues; i++, j++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq == NULL)
+ return -EINVAL;
+ cpfl_rxq = (struct cpfl_rx_queue *)rxq;
+ peer_ports[j] = cpfl_rxq->hairpin_info.peer_txp;
+ }
+ }
+
+ return j;
+}
+
static const struct eth_dev_ops cpfl_eth_dev_ops = {
.dev_configure = cpfl_dev_configure,
.dev_close = cpfl_dev_close,
@@ -1102,6 +1135,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.hairpin_cap_get = cpfl_hairpin_cap_get,
.rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
.tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
+ .hairpin_get_peer_ports = cpfl_hairpin_get_peer_ports,
};
static int
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v2 10/10] net/cpfl: support hairpin bind/unbind
2023-05-19 5:10 ` [PATCH v2 00/10] add hairpin queue support beilei.xing
` (9 preceding siblings ...)
2023-05-19 5:10 ` [PATCH v2 09/10] net/cpfl: support peer ports get beilei.xing
@ 2023-05-19 5:10 ` beilei.xing
2023-05-19 7:31 ` [PATCH v3 00/10] net/cpfl: add hairpin queue support beilei.xing
11 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-19 5:10 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports hairpin_bind/unbind ops.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 137 +++++++++++++++++++++++++++++++++
drivers/net/cpfl/cpfl_rxtx.c | 28 +++++++
drivers/net/cpfl/cpfl_rxtx.h | 2 +
3 files changed, 167 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 5f42728df1..389860d495 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1106,6 +1106,141 @@ cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
return j;
}
+static int
+cpfl_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct idpf_vport *tx_vport = &cpfl_tx_vport->base;
+ struct cpfl_vport *cpfl_rx_vport;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct rte_eth_dev *peer_dev;
+ struct idpf_vport *rx_vport;
+ int err = 0;
+ int i;
+
+ err = cpfl_txq_hairpin_info_update(dev, rx_port);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to update Tx hairpin queue info.");
+ return err;
+ }
+
+ /* configure hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ err = cpfl_hairpin_txq_config(tx_vport, cpfl_txq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u", i);
+ return err;
+ }
+ }
+
+ err = cpfl_hairpin_tx_complq_config(cpfl_tx_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Tx completion queue");
+ return err;
+ }
+
+ peer_dev = &rte_eth_devices[rx_port];
+ cpfl_rx_vport = (struct cpfl_vport *)peer_dev->data->dev_private;
+ rx_vport = &cpfl_rx_vport->base;
+ cpfl_rxq_hairpin_mz_bind(peer_dev);
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ err = cpfl_hairpin_rxq_config(rx_vport, cpfl_rxq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue %u", i);
+ return err;
+ }
+ err = cpfl_rx_queue_init(peer_dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u", i);
+ return err;
+ }
+ }
+
+ err = cpfl_hairpin_rx_bufq_config(cpfl_rx_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue");
+ return err;
+ }
+
+ /* enable hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport,
+ i - cpfl_tx_vport->nb_data_txq,
+ false, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u on",
+ i);
+ return err;
+ }
+ cpfl_txq->base.q_started = true;
+ }
+
+ err = cpfl_switch_hairpin_complq(cpfl_tx_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq");
+ return err;
+ }
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport,
+ i - cpfl_rx_vport->nb_data_rxq,
+ true, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u on",
+ i);
+ }
+ cpfl_rxq->base.q_started = true;
+ }
+
+ err = cpfl_switch_hairpin_bufq(cpfl_rx_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Rx buffer queue");
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+cpfl_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port];
+ struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i;
+
+ /* disable hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport,
+ i - cpfl_tx_vport->nb_data_txq,
+ false, false);
+ cpfl_txq->base.q_started = false;
+ }
+
+ cpfl_switch_hairpin_complq(cpfl_tx_vport, false);
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport,
+ i - cpfl_rx_vport->nb_data_rxq,
+ true, false);
+ cpfl_rxq->base.q_started = false;
+ }
+
+ cpfl_switch_hairpin_bufq(cpfl_rx_vport, false);
+
+ return 0;
+}
+
static const struct eth_dev_ops cpfl_eth_dev_ops = {
.dev_configure = cpfl_dev_configure,
.dev_close = cpfl_dev_close,
@@ -1136,6 +1271,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
.tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
.hairpin_get_peer_ports = cpfl_hairpin_get_peer_ports,
+ .hairpin_bind = cpfl_hairpin_bind,
+ .hairpin_unbind = cpfl_hairpin_unbind,
};
static int
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index ea98fc9407..02ad75d986 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -1030,6 +1030,34 @@ cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool on)
return err;
}
+int
+cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ queue_id = cpfl_vport->p2p_tx_complq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
+int
+cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
int
cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t logic_qid,
bool rx, bool on)
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index b961aad4ce..279c271520 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -102,6 +102,8 @@ int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl
int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
int cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on);
int cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t qid,
bool rx, bool on);
#endif /* _CPFL_RXTX_H_ */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH 03/10] common/idpf: support queue groups add/delete
2023-04-24 8:49 ` Liu, Mingxia
@ 2023-05-19 5:36 ` Xing, Beilei
0 siblings, 0 replies; 164+ messages in thread
From: Xing, Beilei @ 2023-05-19 5:36 UTC (permalink / raw)
To: Liu, Mingxia, Wu, Jingjing; +Cc: dev
> -----Original Message-----
> From: Liu, Mingxia <mingxia.liu@intel.com>
> Sent: Monday, April 24, 2023 4:50 PM
> To: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org
> Subject: RE: [PATCH 03/10] common/idpf: support queue groups add/delete
>
>
>
> > -----Original Message-----
> > From: Xing, Beilei <beilei.xing@intel.com>
> > Sent: Friday, April 21, 2023 2:51 PM
> > To: Wu, Jingjing <jingjing.wu@intel.com>
> > Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> > <beilei.xing@intel.com>
> > Subject: [PATCH 03/10] common/idpf: support queue groups add/delete
> >
> > From: Beilei Xing <beilei.xing@intel.com>
> >
> > This patch adds queue group add/delete virtual channel support.
> >
> > Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > ---
> > drivers/common/idpf/idpf_common_virtchnl.c | 66
> > ++++++++++++++++++++++
> > drivers/common/idpf/idpf_common_virtchnl.h | 9 +++
> > drivers/common/idpf/version.map | 2 +
> > 3 files changed, 77 insertions(+)
> >
> > diff --git a/drivers/common/idpf/idpf_common_virtchnl.c
> > b/drivers/common/idpf/idpf_common_virtchnl.c
> > index a4e129062e..76a658bb26 100644
> > --- a/drivers/common/idpf/idpf_common_virtchnl.c
> > +++ b/drivers/common/idpf/idpf_common_virtchnl.c
> > @@ -359,6 +359,72 @@ idpf_vc_vport_destroy(struct idpf_vport *vport)
> > return err;
> > }
> >
> > +int
> > +idpf_vc_queue_grps_add(struct idpf_vport *vport,
> > + struct virtchnl2_add_queue_groups
> > *ptp_queue_grps_info,
> > + uint8_t *ptp_queue_grps_out)
> [Liu, Mingxia] Better to unify the abbreviation of "port to port" , this patch ptp
> is used, in the next patch p2p is used.
Yes, it's refined in v2 patch.
> > +{
> > + struct idpf_adapter *adapter = vport->adapter;
> > + struct idpf_cmd_info args;
> > + int size, qg_info_size;
> > + int err = -1;
> > +
> > + size = sizeof(*ptp_queue_grps_info) +
> > + (ptp_queue_grps_info->qg_info.num_queue_groups - 1) *
> > + sizeof(struct virtchnl2_queue_group_info);
> > +
> > + memset(&args, 0, sizeof(args));
> > + args.ops = VIRTCHNL2_OP_ADD_QUEUE_GROUPS;
> > + args.in_args = (uint8_t *)ptp_queue_grps_info;
> > + args.in_args_size = size;
> > + args.out_buffer = adapter->mbx_resp;
> > + args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> > +
> > + err = idpf_vc_cmd_execute(adapter, &args);
> > + if (err != 0) {
> > + DRV_LOG(ERR,
> > + "Failed to execute command of
> > VIRTCHNL2_OP_ADD_QUEUE_GROUPS");
> > + return err;
> > + }
> > +
> > + rte_memcpy(ptp_queue_grps_out, args.out_buffer,
> > IDPF_DFLT_MBX_BUF_SIZE);
> > + return 0;
> > +}
> > +
> > +int idpf_vc_queue_grps_del(struct idpf_vport *vport,
> > + uint16_t num_q_grps,
> > + struct virtchnl2_queue_group_id *qg_ids) {
> > + struct idpf_adapter *adapter = vport->adapter;
> > + struct virtchnl2_delete_queue_groups *vc_del_q_grps;
> > + struct idpf_cmd_info args;
> > + int size;
> > + int err;
> > +
> > + size = sizeof(*vc_del_q_grps) +
> > + (num_q_grps - 1) * sizeof(struct virtchnl2_queue_group_id);
> > + vc_del_q_grps = rte_zmalloc("vc_del_q_grps", size, 0);
> > +
> > + vc_del_q_grps->vport_id = vport->vport_id;
> > + vc_del_q_grps->num_queue_groups = num_q_grps;
> > + memcpy(vc_del_q_grps->qg_ids, qg_ids,
> > + num_q_grps * sizeof(struct virtchnl2_queue_group_id));
> > +
> > + memset(&args, 0, sizeof(args));
> > + args.ops = VIRTCHNL2_OP_DEL_QUEUE_GROUPS;
> > + args.in_args = (uint8_t *)vc_del_q_grps;
> > + args.in_args_size = size;
> > + args.out_buffer = adapter->mbx_resp;
> > + args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> > +
> > + err = idpf_vc_cmd_execute(adapter, &args);
> > + if (err != 0)
> > + DRV_LOG(ERR, "Failed to execute command of
> > +VIRTCHNL2_OP_DEL_QUEUE_GROUPS");
> > +
> > + rte_free(vc_del_q_grps);
> > + return err;
> > +}
> > +
> > int
> > idpf_vc_rss_key_set(struct idpf_vport *vport) { diff --git
> > a/drivers/common/idpf/idpf_common_virtchnl.h
> > b/drivers/common/idpf/idpf_common_virtchnl.h
> > index d479d93c8e..bf1d014c8d 100644
> > --- a/drivers/common/idpf/idpf_common_virtchnl.h
> > +++ b/drivers/common/idpf/idpf_common_virtchnl.h
> > @@ -64,4 +64,13 @@ int idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq,
> > u16 *num_q_msg, __rte_internal int idpf_vc_ctlq_post_rx_buffs(struct
> > idpf_hw *hw, struct idpf_ctlq_info *cq,
> > u16 *buff_count, struct idpf_dma_mem **buffs);
> > +__rte_internal
> > +int idpf_vc_queue_grps_del(struct idpf_vport *vport,
> > + uint16_t num_q_grps,
> > + struct virtchnl2_queue_group_id *qg_ids);
> > __rte_internal int
> > +idpf_vc_queue_grps_add(struct idpf_vport *vport,
> > + struct virtchnl2_add_queue_groups
> > *ptp_queue_grps_info,
> > + uint8_t *ptp_queue_grps_out);
> > #endif /* _IDPF_COMMON_VIRTCHNL_H_ */ diff --git
> > a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
> > index 7076759024..aa67f7ee27
> > 100644
> > --- a/drivers/common/idpf/version.map
> > +++ b/drivers/common/idpf/version.map
> > @@ -48,6 +48,8 @@ INTERNAL {
> > idpf_vc_irq_map_unmap_config;
> > idpf_vc_one_msg_read;
> > idpf_vc_ptype_info_query;
> > + idpf_vc_queue_grps_add;
> > + idpf_vc_queue_grps_del;
> > idpf_vc_queue_switch;
> > idpf_vc_queues_ena_dis;
> > idpf_vc_rss_hash_get;
> > --
> > 2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH 04/10] net/cpfl: add haipin queue group during vpotr init
2023-04-24 8:55 ` Liu, Mingxia
@ 2023-05-19 5:36 ` Xing, Beilei
0 siblings, 0 replies; 164+ messages in thread
From: Xing, Beilei @ 2023-05-19 5:36 UTC (permalink / raw)
To: Liu, Mingxia, Wu, Jingjing; +Cc: dev
> -----Original Message-----
> From: Liu, Mingxia <mingxia.liu@intel.com>
> Sent: Monday, April 24, 2023 4:55 PM
> To: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org
> Subject: RE: [PATCH 04/10] net/cpfl: add haipin queue group during vpotr init
>
>
>
> > -----Original Message-----
> > From: Xing, Beilei <beilei.xing@intel.com>
> > Sent: Friday, April 21, 2023 2:51 PM
> > To: Wu, Jingjing <jingjing.wu@intel.com>
> > Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> > <beilei.xing@intel.com>
> > Subject: [PATCH 04/10] net/cpfl: add haipin queue group during vpotr
> > init
> [Liu, Mingxia] vpotr , spelling error?
Good catch, thanks.
> >
> > From: Beilei Xing <beilei.xing@intel.com>
> >
> > This patch adds haipin queue group during vpotr init.
> >
> > Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > ---
> > drivers/net/cpfl/cpfl_ethdev.c | 125
> > +++++++++++++++++++++++++++++++++ drivers/net/cpfl/cpfl_ethdev.h |
> > 17 +++++
> > drivers/net/cpfl/cpfl_rxtx.h | 4 ++
> > 3 files changed, 146 insertions(+)
> >
> > diff --git a/drivers/net/cpfl/cpfl_ethdev.c
> > b/drivers/net/cpfl/cpfl_ethdev.c index 114fc18f5f..ad5ddebd3a 100644
> > --- a/drivers/net/cpfl/cpfl_ethdev.c
> > +++ b/drivers/net/cpfl/cpfl_ethdev.c
> > @@ -856,6 +856,20 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
> > return 0;
> > }
> >
> > +static int
> > +cpfl_p2p_queue_grps_del(struct idpf_vport *vport) {
> > + struct virtchnl2_queue_group_id
> > qg_ids[CPFL_P2P_NB_QUEUE_GRPS] = {0};
> > + int ret = 0;
> > +
> > + qg_ids[0].queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
> > + qg_ids[0].queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
> > + ret = idpf_vc_queue_grps_del(vport, CPFL_P2P_NB_QUEUE_GRPS,
> > qg_ids);
> > + if (ret)
> > + PMD_DRV_LOG(ERR, "Failed to delete p2p queue groups");
> > + return ret;
> > +}
> > +
> > static int
> > cpfl_dev_close(struct rte_eth_dev *dev) { @@ -864,6 +878,9 @@
> > cpfl_dev_close(struct rte_eth_dev *dev)
> > struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport-
> > >adapter);
> >
> > cpfl_dev_stop(dev);
> > +
> > + cpfl_p2p_queue_grps_del(vport);
> > +
> > idpf_vport_deinit(vport);
> >
> > adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id); @@ -
> > 1350,6 +1367,96 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext
> > *adapter)
> > return vport_idx;
> > }
> >
> > +static int
> > +cpfl_p2p_q_grps_add(struct idpf_vport *vport,
> > + struct virtchnl2_add_queue_groups
> > *p2p_queue_grps_info,
> > + uint8_t *p2p_q_vc_out_info)
> > +{
> > + int ret;
> > +
> > + p2p_queue_grps_info->vport_id = vport->vport_id;
> > + p2p_queue_grps_info->qg_info.num_queue_groups =
> > CPFL_P2P_NB_QUEUE_GRPS;
> > + p2p_queue_grps_info->qg_info.groups[0].num_rx_q =
> > CPFL_MAX_P2P_NB_QUEUES;
> > + p2p_queue_grps_info->qg_info.groups[0].num_rx_bufq =
> > CPFL_P2P_NB_RX_BUFQ;
> > + p2p_queue_grps_info->qg_info.groups[0].num_tx_q =
> > CPFL_MAX_P2P_NB_QUEUES;
> > + p2p_queue_grps_info->qg_info.groups[0].num_tx_complq =
> > CPFL_P2P_NB_TX_COMPLQ;
> > + p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_id =
> > CPFL_P2P_QUEUE_GRP_ID;
> > + p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_type
> > = VIRTCHNL2_QUEUE_GROUP_P2P;
> > + p2p_queue_grps_info-
> > >qg_info.groups[0].rx_q_grp_info.rss_lut_size = 0;
> > + p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.tx_tc = 0;
> > + p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.priority =
> > 0;
> > + p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.is_sp = 0;
> > + p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.pir_weight
> > = 0;
> > +
> > + ret = idpf_vc_queue_grps_add(vport, p2p_queue_grps_info,
> > p2p_q_vc_out_info);
> > + if (ret != 0) {
> > + PMD_DRV_LOG(ERR, "Failed to add p2p queue groups.");
> > + return ret;
> > + }
> > +
> > + return ret;
> > +}
> > +
> > +static int
> > +cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
> > + struct virtchnl2_add_queue_groups
> > *p2p_q_vc_out_info) {
> > + struct p2p_queue_chunks_info *p2p_q_chunks_info =
> > &cpfl_vport->p2p_q_chunks_info;
> > + struct virtchnl2_queue_reg_chunks *vc_chunks_out;
> > + int i, type;
> > +
> > + if (p2p_q_vc_out_info-
> > >qg_info.groups[0].qg_id.queue_group_type !=
> > + VIRTCHNL2_QUEUE_GROUP_P2P) {
> > + PMD_DRV_LOG(ERR, "Add queue group response
> > mismatch.");
> > + return -EINVAL;
> > + }
> > +
> > + vc_chunks_out = &p2p_q_vc_out_info->qg_info.groups[0].chunks;
> > +
> > + for (i = 0; i < vc_chunks_out->num_chunks; i++) {
> > + type = vc_chunks_out->chunks[i].type;
> > + switch (type) {
> > + case VIRTCHNL2_QUEUE_TYPE_TX:
> > + p2p_q_chunks_info->tx_start_qid =
> > + vc_chunks_out->chunks[i].start_queue_id;
> > + p2p_q_chunks_info->tx_qtail_start =
> > + vc_chunks_out->chunks[i].qtail_reg_start;
> > + p2p_q_chunks_info->tx_qtail_spacing =
> > + vc_chunks_out->chunks[i].qtail_reg_spacing;
> > + break;
> > + case VIRTCHNL2_QUEUE_TYPE_RX:
> > + p2p_q_chunks_info->rx_start_qid =
> > + vc_chunks_out->chunks[i].start_queue_id;
> > + p2p_q_chunks_info->rx_qtail_start =
> > + vc_chunks_out->chunks[i].qtail_reg_start;
> > + p2p_q_chunks_info->rx_qtail_spacing =
> > + vc_chunks_out->chunks[i].qtail_reg_spacing;
> > + break;
> > + case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
> > + p2p_q_chunks_info->tx_compl_start_qid =
> > + vc_chunks_out->chunks[i].start_queue_id;
> > + p2p_q_chunks_info->tx_compl_qtail_start =
> > + vc_chunks_out->chunks[i].qtail_reg_start;
> > + p2p_q_chunks_info->tx_compl_qtail_spacing =
> > + vc_chunks_out->chunks[i].qtail_reg_spacing;
> > + break;
> > + case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
> > + p2p_q_chunks_info->rx_buf_start_qid =
> > + vc_chunks_out->chunks[i].start_queue_id;
> > + p2p_q_chunks_info->rx_buf_qtail_start =
> > + vc_chunks_out->chunks[i].qtail_reg_start;
> > + p2p_q_chunks_info->rx_buf_qtail_spacing =
> > + vc_chunks_out->chunks[i].qtail_reg_spacing;
> > + break;
> > + default:
> > + PMD_DRV_LOG(ERR, "Unsupported queue type");
> > + break;
> > + }
> > + }
> > +
> > + return 0;
> > +}
> > +
> > static int
> > cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params) { @@
> > -
> > 1359,6 +1466,8 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void
> > *init_params)
> > struct cpfl_adapter_ext *adapter = param->adapter;
> > /* for sending create vport virtchnl msg prepare */
> > struct virtchnl2_create_vport create_vport_info;
> > + struct virtchnl2_add_queue_groups p2p_queue_grps_info;
> > + uint8_t p2p_q_vc_out_info[IDPF_DFLT_MBX_BUF_SIZE] = {0};
> > int ret = 0;
> >
> > dev->dev_ops = &cpfl_eth_dev_ops;
> > @@ -1380,6 +1489,19 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev,
> > void *init_params)
> > goto err;
> > }
> >
> > + memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
> > + ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info,
> > p2p_q_vc_out_info);
> > + if (ret != 0) {
> > + PMD_INIT_LOG(ERR, "Failed to add p2p queue group.");
> > + goto err_q_grps_add;
> > + }
> > + ret = cpfl_p2p_queue_info_init(cpfl_vport,
> > + (struct virtchnl2_add_queue_groups
> > *)p2p_q_vc_out_info);
> > + if (ret != 0) {
> > + PMD_INIT_LOG(ERR, "Failed to init p2p queue info.");
> > + goto err_p2p_qinfo_init;
> > + }
> > +
> > adapter->vports[param->idx] = cpfl_vport;
> > adapter->cur_vports |= RTE_BIT32(param->devarg_id);
> > adapter->cur_vport_nb++;
> > @@ -1397,6 +1519,9 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev,
> > void *init_params)
> > return 0;
> >
> > err_mac_addrs:
> > +err_p2p_qinfo_init:
> > + cpfl_p2p_queue_grps_del(vport);
> > +err_q_grps_add:
> > adapter->vports[param->idx] = NULL; /* reset */
> > idpf_vport_deinit(vport);
> > adapter->cur_vports &= ~RTE_BIT32(param->devarg_id); diff --git
> > a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
> > index 81fe9ac4c3..5e2e7a1bfb 100644
> > --- a/drivers/net/cpfl/cpfl_ethdev.h
> > +++ b/drivers/net/cpfl/cpfl_ethdev.h
> > @@ -69,8 +69,25 @@ struct cpfl_devargs {
> > uint16_t req_vport_nb;
> > };
> >
> > +struct p2p_queue_chunks_info {
> > + uint32_t tx_start_qid;
> > + uint32_t rx_start_qid;
> > + uint32_t tx_compl_start_qid;
> > + uint32_t rx_buf_start_qid;
> > +
> > + uint64_t tx_qtail_start;
> > + uint32_t tx_qtail_spacing;
> > + uint64_t rx_qtail_start;
> > + uint32_t rx_qtail_spacing;
> > + uint64_t tx_compl_qtail_start;
> > + uint32_t tx_compl_qtail_spacing;
> > + uint64_t rx_buf_qtail_start;
> > + uint32_t rx_buf_qtail_spacing;
> > +};
> > +
> > struct cpfl_vport {
> > struct idpf_vport base;
> > + struct p2p_queue_chunks_info p2p_q_chunks_info;
> > };
> >
> > struct cpfl_adapter_ext {
> > diff --git a/drivers/net/cpfl/cpfl_rxtx.h
> > b/drivers/net/cpfl/cpfl_rxtx.h index
> > b2b3537d10..3a87a1f4b3 100644
> > --- a/drivers/net/cpfl/cpfl_rxtx.h
> > +++ b/drivers/net/cpfl/cpfl_rxtx.h
> > @@ -17,6 +17,10 @@
> > #define CPFL_MAX_HAIRPINQ_TX_2_RX 1
> > #define CPFL_MAX_HAIRPINQ_NB_DESC 1024
> > #define CPFL_MAX_P2P_NB_QUEUES 16
> > +#define CPFL_P2P_NB_RX_BUFQ 1
> > +#define CPFL_P2P_NB_TX_COMPLQ 1
> > +#define CPFL_P2P_NB_QUEUE_GRPS 1
> > +#define CPFL_P2P_QUEUE_GRP_ID 1
> > /* Base address of the HW descriptor ring should be 128B aligned. */
> > #define CPFL_RING_BASE_ALIGN 128
> >
> > --
> > 2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH 06/10] net/cpfl: support hairpin queue configuration
2023-04-24 9:48 ` Liu, Mingxia
@ 2023-05-19 5:43 ` Xing, Beilei
0 siblings, 0 replies; 164+ messages in thread
From: Xing, Beilei @ 2023-05-19 5:43 UTC (permalink / raw)
To: Liu, Mingxia, Wu, Jingjing; +Cc: dev, Wang, Xiao W
> -----Original Message-----
> From: Liu, Mingxia <mingxia.liu@intel.com>
> Sent: Monday, April 24, 2023 5:48 PM
> To: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Wang, Xiao W <xiao.w.wang@intel.com>
> Subject: RE: [PATCH 06/10] net/cpfl: support hairpin queue configuration
>
>
>
> > -----Original Message-----
> > From: Xing, Beilei <beilei.xing@intel.com>
> > Sent: Friday, April 21, 2023 2:51 PM
> > To: Wu, Jingjing <jingjing.wu@intel.com>
> > Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> > <beilei.xing@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>
> > Subject: [PATCH 06/10] net/cpfl: support hairpin queue configuration
> >
> > From: Beilei Xing <beilei.xing@intel.com>
> >
> > This patch supports Rx/Tx hairpin queue configuration.
> >
> > Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> > Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > ---
> > drivers/common/idpf/idpf_common_virtchnl.c | 70 +++++++++++
> > drivers/common/idpf/idpf_common_virtchnl.h | 6 +
> > drivers/common/idpf/version.map | 2 +
> > drivers/net/cpfl/cpfl_ethdev.c | 136 ++++++++++++++++++++-
> > drivers/net/cpfl/cpfl_rxtx.c | 80 ++++++++++++
> > drivers/net/cpfl/cpfl_rxtx.h | 7 ++
> > 6 files changed, 297 insertions(+), 4 deletions(-)
> >
> > diff --git a/drivers/common/idpf/idpf_common_virtchnl.c
> > b/drivers/common/idpf/idpf_common_virtchnl.c
> > index 76a658bb26..50cd43a8dd 100644
> > --- a/drivers/common/idpf/idpf_common_virtchnl.c
> > +++ b/drivers/common/idpf/idpf_common_virtchnl.c
<...>
> > static int
> > cpfl_start_queues(struct rte_eth_dev *dev) {
> > + struct cpfl_vport *cpfl_vport = dev->data->dev_private;
> > + struct idpf_vport *vport = &cpfl_vport->base;
> > struct cpfl_rx_queue *cpfl_rxq;
> > struct cpfl_tx_queue *cpfl_txq;
> > + int tx_cmplq_flag = 0;
> > + int rx_bufq_flag = 0;
> > + int flag = 0;
> > int err = 0;
> > int i;
> >
> > + /* For normal data queues, configure, init and enale Txq.
> > + * For non-cross vport hairpin queues, configure Txq.
> > + */
> > for (i = 0; i < dev->data->nb_tx_queues; i++) {
> > cpfl_txq = dev->data->tx_queues[i];
> > if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
> > continue;
> > - err = cpfl_tx_queue_start(dev, i);
> > + if (!cpfl_txq->hairpin_info.hairpin_q) {
> > + err = cpfl_tx_queue_start(dev, i);
> > + if (err != 0) {
> > + PMD_DRV_LOG(ERR, "Fail to start Tx
> > queue %u", i);
> > + return err;
> > + }
> > + } else if (!cpfl_txq->hairpin_info.manual_bind) {
> > + if (flag == 0) {
> > + err = cpfl_txq_hairpin_info_update(dev,
> > + cpfl_txq-
> > >hairpin_info.peer_rxp);
> > + if (err != 0) {
> > + PMD_DRV_LOG(ERR, "Fail to update
> Tx
> > hairpin queue info");
> > + return err;
> > + }
> > + flag = 1;
> [Liu, Mingxia] The variable flag is not been used, can it be removed?
It's used in above code, txq_hairpin_info should be updated once.
> > + }
> > + err = cpfl_hairpin_txq_config(vport, cpfl_txq);
> > + if (err != 0) {
> > + PMD_DRV_LOG(ERR, "Fail to configure hairpin
> > Tx queue %u", i);
> > + return err;
> > + }
> > + tx_cmplq_flag = 1;
> > + }
> > + }
> > +
>
> > + /* For non-cross vport hairpin queues, configure Tx completion queue
> > first.*/
> > + if (tx_cmplq_flag == 1 && cpfl_vport->p2p_tx_complq != NULL) {
> > + err = cpfl_hairpin_tx_complq_config(cpfl_vport);
> > if (err != 0) {
> > - PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
> > + PMD_DRV_LOG(ERR, "Fail to config Tx completion
> > queue");
> > return err;
> > }
> > }
> >
> [Liu, Mingxia] Better to move this code next to
> + err = cpfl_hairpin_txq_config(vport, cpfl_txq);
> + if (err != 0) {
> + PMD_DRV_LOG(ERR, "Fail to configure hairpin
> Tx queue %u", i);
> + return err;
> + }
> When cpfl_rxq->hairpin_info.hairpin_q is true, then cpfl_vport-
> >p2p_tx_complq is not null, right ?
> And remove tx_cmplq_flag?
Hairpin tx completion queue should only be configured once, so it should not be in for loop.
However, code is refined in v2.
>
> > + /* For normal data queues, configure, init and enale Rxq.
> > + * For non-cross vport hairpin queues, configure Rxq, and then init Rxq.
> > + */
> > + cpfl_rxq_hairpin_mz_bind(dev);
> > for (i = 0; i < dev->data->nb_rx_queues; i++) {
> > cpfl_rxq = dev->data->rx_queues[i];
> > if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)
> > continue;
> > - err = cpfl_rx_queue_start(dev, i);
> > + if (!cpfl_rxq->hairpin_info.hairpin_q) {
> > + err = cpfl_rx_queue_start(dev, i);
> > + if (err != 0) {
> > + PMD_DRV_LOG(ERR, "Fail to start Rx
> > queue %u", i);
> > + return err;
> > + }
> > + } else if (!cpfl_rxq->hairpin_info.manual_bind) {
> > + err = cpfl_hairpin_rxq_config(vport, cpfl_rxq);
> > + if (err != 0) {
> > + PMD_DRV_LOG(ERR, "Fail to configure hairpin
> > Rx queue %u", i);
> > + return err;
> > + }
> > + err = cpfl_rx_queue_init(dev, i);
> > + if (err != 0) {
> > + PMD_DRV_LOG(ERR, "Fail to init hairpin Rx
> > queue %u", i);
> > + return err;
> > + }
> > + rx_bufq_flag = 1;
> > + }
> > + }
> > +
>
> > + /* For non-cross vport hairpin queues, configure Rx buffer queue.*/
> > + if (rx_bufq_flag == 1 && cpfl_vport->p2p_rx_bufq != NULL) {
> > + err = cpfl_hairpin_rx_bufq_config(cpfl_vport);
> > if (err != 0) {
> > - PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
> > + PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue");
> > return err;
> > }
> > }
> [Liu, Mingxia] Similar to above.
>
> > diff --git a/drivers/net/cpfl/cpfl_rxtx.c
> > b/drivers/net/cpfl/cpfl_rxtx.c index 64ed331a6d..040beb5bac 100644
> > --- a/drivers/net/cpfl/cpfl_rxtx.c
> > +++ b/drivers/net/cpfl/cpfl_rxtx.c
> > @@ -930,6 +930,86 @@ cpfl_tx_hairpin_queue_setup(struct rte_eth_dev
> > *dev, uint16_t queue_idx,
> > return 0;
> > }
> >
> > +int
> > +cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport) {
> > + struct idpf_rx_queue *rx_bufq = cpfl_vport->p2p_rx_bufq;
> > + struct virtchnl2_rxq_info rxq_info[1] = {0};
> > +
> > + rxq_info[0].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
> > + rxq_info[0].queue_id = rx_bufq->queue_id;
> > + rxq_info[0].ring_len = rx_bufq->nb_rx_desc;
> > + rxq_info[0].dma_ring_addr = rx_bufq->rx_ring_phys_addr;
> > + rxq_info[0].desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
> > + rxq_info[0].rx_buffer_low_watermark =
> > CPFL_RXBUF_LOW_WATERMARK;
> > + rxq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
> > + rxq_info[0].data_buffer_size = rx_bufq->rx_buf_len;
> > + rxq_info[0].buffer_notif_stride = CPFL_RX_BUF_STRIDE;
> > +
> > + return idpf_vc_rxq_config_by_info(&cpfl_vport->base, rxq_info, 1); }
> > +
> > +int
> > +cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct
> > +cpfl_rx_queue
> > +*cpfl_rxq) {
> > + struct virtchnl2_rxq_info rxq_info[1] = {0};
> > + struct idpf_rx_queue *rxq = &cpfl_rxq->base;
> > +
> > + rxq_info[0].type = VIRTCHNL2_QUEUE_TYPE_RX;
> > + rxq_info[0].queue_id = rxq->queue_id;
> > + rxq_info[0].ring_len = rxq->nb_rx_desc;
> > + rxq_info[0].dma_ring_addr = rxq->rx_ring_phys_addr;
> > + rxq_info[0].rx_bufq1_id = rxq->bufq1->queue_id;
> > + rxq_info[0].max_pkt_size = vport->max_pkt_len;
> > + rxq_info[0].desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
> > + rxq_info[0].qflags |= VIRTCHNL2_RX_DESC_SIZE_16BYTE;
> > +
> > + rxq_info[0].data_buffer_size = rxq->rx_buf_len;
> > + rxq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
> > + rxq_info[0].rx_buffer_low_watermark =
> > CPFL_RXBUF_LOW_WATERMARK;
> > +
> > + PMD_DRV_LOG(NOTICE, "hairpin: vport %u, Rxq id 0x%x",
> > + vport->vport_id, rxq_info[0].queue_id);
> > +
> > + return idpf_vc_rxq_config_by_info(vport, rxq_info, 1); }
> > +
> > +int
> > +cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport) {
> > + struct idpf_tx_queue *tx_complq = cpfl_vport->p2p_tx_complq;
> > + struct virtchnl2_txq_info txq_info[1] = {0};
> > +
> > + txq_info[0].dma_ring_addr = tx_complq->tx_ring_phys_addr;
> > + txq_info[0].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
> > + txq_info[0].queue_id = tx_complq->queue_id;
> > + txq_info[0].ring_len = tx_complq->nb_tx_desc;
> > + txq_info[0].peer_rx_queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
> > + txq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
> > + txq_info[0].sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
> > +
> > + return idpf_vc_txq_config_by_info(&cpfl_vport->base, txq_info, 1); }
> > +
> > +int
> > +cpfl_hairpin_txq_config(struct idpf_vport *vport, struct
> > +cpfl_tx_queue
> > +*cpfl_txq) {
> > + struct idpf_tx_queue *txq = &cpfl_txq->base;
> > + struct virtchnl2_txq_info txq_info[1] = {0};
> > +
> > + txq_info[0].dma_ring_addr = txq->tx_ring_phys_addr;
> > + txq_info[0].type = VIRTCHNL2_QUEUE_TYPE_TX;
> > + txq_info[0].queue_id = txq->queue_id;
> > + txq_info[0].ring_len = txq->nb_tx_desc;
> > + txq_info[0].tx_compl_queue_id = txq->complq->queue_id;
> > + txq_info[0].relative_queue_id = txq->queue_id;
> > + txq_info[0].peer_rx_queue_id = cpfl_txq->hairpin_info.peer_rxq_id;
> > + txq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
> > + txq_info[0].sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
> > +
> > + return idpf_vc_txq_config_by_info(vport, txq_info, 1); }
> > +
> > int
> > cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id) {
> > diff --git a/drivers/net/cpfl/cpfl_rxtx.h
> > b/drivers/net/cpfl/cpfl_rxtx.h index
> > d844c9f057..b01ce5edf9 100644
> > --- a/drivers/net/cpfl/cpfl_rxtx.h
> > +++ b/drivers/net/cpfl/cpfl_rxtx.h
> > @@ -30,12 +30,15 @@
> > #define CPFL_RING_BASE_ALIGN 128
> >
> > #define CPFL_DEFAULT_RX_FREE_THRESH 32
> > +#define CPFL_RXBUF_LOW_WATERMARK 64
> >
> > #define CPFL_DEFAULT_TX_RS_THRESH 32
> > #define CPFL_DEFAULT_TX_FREE_THRESH 32
> >
> > #define CPFL_SUPPORT_CHAIN_NUM 5
> >
> > +#define CPFL_RX_BUF_STRIDE 64
> > +
> > struct cpfl_rxq_hairpin_info {
> > bool hairpin_q; /* if rx queue is a hairpin queue */
> > bool manual_bind; /* for cross vport */
> > @@ -85,4 +88,8 @@ int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev
> > *dev, uint16_t queue_idx, int cpfl_tx_hairpin_queue_setup(struct
> > rte_eth_dev *dev, uint16_t queue_idx,
> > uint16_t nb_desc,
> > const struct rte_eth_hairpin_conf *conf);
> > +int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport); int
> > +cpfl_hairpin_txq_config(struct idpf_vport *vport, struct
> > +cpfl_tx_queue *cpfl_txq); int cpfl_hairpin_rx_bufq_config(struct
> > +cpfl_vport *cpfl_vport); int cpfl_hairpin_rxq_config(struct
> > +idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
> > #endif /* _CPFL_RXTX_H_ */
> > --
> > 2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v3 00/10] net/cpfl: add hairpin queue support
2023-05-19 5:10 ` [PATCH v2 00/10] add hairpin queue support beilei.xing
` (10 preceding siblings ...)
2023-05-19 5:10 ` [PATCH v2 10/10] net/cpfl: support hairpin bind/unbind beilei.xing
@ 2023-05-19 7:31 ` beilei.xing
2023-05-19 7:31 ` [PATCH v3 01/10] net/cpfl: refine structures beilei.xing
` (10 more replies)
11 siblings, 11 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-19 7:31 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patchset adds hairpin queue support.
v2 changes:
- change hairpin rx queus configuration sequence.
- code refine.
v3 changes:
- Refine the patchset based on the latest code.
Beilei Xing (10):
net/cpfl: refine structures
net/cpfl: support hairpin queue capbility get
common/idpf: support queue groups add/delete
net/cpfl: add haipin queue group during vport init
net/cpfl: support hairpin queue setup and release
net/cpfl: support hairpin queue configuration
net/cpfl: support hairpin queue start/stop
net/cpfl: enable write back based on ITR expire
net/cpfl: support peer ports get
net/cpfl: support hairpin bind/unbind
drivers/common/idpf/idpf_common_device.c | 75 ++
drivers/common/idpf/idpf_common_device.h | 4 +
drivers/common/idpf/idpf_common_virtchnl.c | 138 +++-
drivers/common/idpf/idpf_common_virtchnl.h | 18 +
drivers/common/idpf/version.map | 6 +
drivers/net/cpfl/cpfl_ethdev.c | 591 +++++++++++++--
drivers/net/cpfl/cpfl_ethdev.h | 36 +-
drivers/net/cpfl/cpfl_rxtx.c | 807 +++++++++++++++++++--
drivers/net/cpfl/cpfl_rxtx.h | 65 ++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 21 +-
10 files changed, 1642 insertions(+), 119 deletions(-)
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v3 01/10] net/cpfl: refine structures
2023-05-19 7:31 ` [PATCH v3 00/10] net/cpfl: add hairpin queue support beilei.xing
@ 2023-05-19 7:31 ` beilei.xing
2023-05-19 7:31 ` [PATCH v3 02/10] net/cpfl: support hairpin queue capbility get beilei.xing
` (9 subsequent siblings)
10 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-19 7:31 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch refines some structures to support hairpin queue,
cpfl_rx_queue/cpfl_tx_queue/cpfl_vport.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 85 +++++++-----
drivers/net/cpfl/cpfl_ethdev.h | 6 +-
drivers/net/cpfl/cpfl_rxtx.c | 175 +++++++++++++++++-------
drivers/net/cpfl/cpfl_rxtx.h | 8 ++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 17 +--
5 files changed, 196 insertions(+), 95 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 7528a14d05..e587155db6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -124,7 +124,8 @@ static int
cpfl_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct rte_eth_link new_link;
unsigned int i;
@@ -156,7 +157,8 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,
static int
cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
dev_info->max_rx_queues = base->caps.max_rx_q;
@@ -216,7 +218,8 @@ cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
static int
cpfl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
/* mtu setting is forbidden if port is start */
if (dev->data->dev_started) {
@@ -256,12 +259,12 @@ static uint64_t
cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
{
uint64_t mbuf_alloc_failed = 0;
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i = 0;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- mbuf_alloc_failed += __atomic_load_n(&rxq->rx_stats.mbuf_alloc_failed,
+ cpfl_rxq = dev->data->rx_queues[i];
+ mbuf_alloc_failed += __atomic_load_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,
__ATOMIC_RELAXED);
}
@@ -271,8 +274,8 @@ cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
static int
cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
int ret;
@@ -305,20 +308,20 @@ cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
static void
cpfl_reset_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- __atomic_store_n(&rxq->rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
+ cpfl_rxq = dev->data->rx_queues[i];
+ __atomic_store_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
}
}
static int
cpfl_dev_stats_reset(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
int ret;
@@ -343,8 +346,8 @@ static int cpfl_dev_xstats_reset(struct rte_eth_dev *dev)
static int cpfl_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned int n)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
unsigned int i;
int ret;
@@ -459,7 +462,8 @@ cpfl_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t idx, shift;
int ret = 0;
@@ -498,7 +502,8 @@ cpfl_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t idx, shift;
int ret = 0;
@@ -536,7 +541,8 @@ static int
cpfl_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
int ret = 0;
@@ -601,7 +607,8 @@ static int
cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
int ret = 0;
@@ -638,7 +645,8 @@ cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,
static int
cpfl_dev_configure(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct rte_eth_conf *conf = &dev->data->dev_conf;
struct idpf_adapter *base = vport->adapter;
int ret;
@@ -710,7 +718,8 @@ cpfl_dev_configure(struct rte_eth_dev *dev)
static int
cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
uint16_t nb_rx_queues = dev->data->nb_rx_queues;
return idpf_vport_irq_map_config(vport, nb_rx_queues);
@@ -719,14 +728,14 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
static int
cpfl_start_queues(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
- struct idpf_tx_queue *txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
int err = 0;
int i;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL || txq->tx_deferred_start)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
continue;
err = cpfl_tx_queue_start(dev, i);
if (err != 0) {
@@ -736,8 +745,8 @@ cpfl_start_queues(struct rte_eth_dev *dev)
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- if (rxq == NULL || rxq->rx_deferred_start)
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)
continue;
err = cpfl_rx_queue_start(dev, i);
if (err != 0) {
@@ -752,7 +761,8 @@ cpfl_start_queues(struct rte_eth_dev *dev)
static int
cpfl_dev_start(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(base);
uint16_t num_allocated_vectors = base->caps.num_allocated_vectors;
@@ -813,7 +823,8 @@ cpfl_dev_start(struct rte_eth_dev *dev)
static int
cpfl_dev_stop(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
if (dev->data->dev_started == 0)
return 0;
@@ -832,7 +843,8 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
static int
cpfl_dev_close(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
@@ -842,7 +854,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
adapter->cur_vport_nb--;
dev->data->dev_private = NULL;
adapter->vports[vport->sw_idx] = NULL;
- rte_free(vport);
+ rte_free(cpfl_vport);
return 0;
}
@@ -1047,7 +1059,7 @@ cpfl_find_vport(struct cpfl_adapter_ext *adapter, uint32_t vport_id)
int i;
for (i = 0; i < adapter->cur_vport_nb; i++) {
- vport = adapter->vports[i];
+ vport = &adapter->vports[i]->base;
if (vport->vport_id != vport_id)
continue;
else
@@ -1275,7 +1287,8 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter)
static int
cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_vport_param *param = init_params;
struct cpfl_adapter_ext *adapter = param->adapter;
/* for sending create vport virtchnl msg prepare */
@@ -1300,7 +1313,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
goto err;
}
- adapter->vports[param->idx] = vport;
+ adapter->vports[param->idx] = cpfl_vport;
adapter->cur_vports |= RTE_BIT32(param->devarg_id);
adapter->cur_vport_nb++;
@@ -1415,7 +1428,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
snprintf(name, sizeof(name), "cpfl_%s_vport_0",
pci_dev->device.name);
retval = rte_eth_dev_create(&pci_dev->device, name,
- sizeof(struct idpf_vport),
+ sizeof(struct cpfl_vport),
NULL, NULL, cpfl_dev_vport_init,
&vport_param);
if (retval != 0)
@@ -1433,7 +1446,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
pci_dev->device.name,
devargs.req_vports[i]);
retval = rte_eth_dev_create(&pci_dev->device, name,
- sizeof(struct idpf_vport),
+ sizeof(struct cpfl_vport),
NULL, NULL, cpfl_dev_vport_init,
&vport_param);
if (retval != 0)
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 200dfcac02..81fe9ac4c3 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -69,13 +69,17 @@ struct cpfl_devargs {
uint16_t req_vport_nb;
};
+struct cpfl_vport {
+ struct idpf_vport base;
+};
+
struct cpfl_adapter_ext {
TAILQ_ENTRY(cpfl_adapter_ext) next;
struct idpf_adapter base;
char name[CPFL_ADAPTER_NAME_LEN];
- struct idpf_vport **vports;
+ struct cpfl_vport **vports;
uint16_t max_vport_nb;
uint16_t cur_vports; /* bit mask of created vport */
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 75021c3c54..04a51b8d15 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -128,7 +128,8 @@ cpfl_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *rxq,
uint16_t nb_desc, unsigned int socket_id,
struct rte_mempool *mp, uint8_t bufq_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct idpf_hw *hw = &base->hw;
const struct rte_memzone *mz;
@@ -220,15 +221,69 @@ cpfl_rx_split_bufq_release(struct idpf_rx_queue *bufq)
rte_free(bufq);
}
+static void
+cpfl_rx_queue_release(void *rxq)
+{
+ struct cpfl_rx_queue *cpfl_rxq = rxq;
+ struct idpf_rx_queue *q = NULL;
+
+ if (cpfl_rxq == NULL)
+ return;
+
+ q = &cpfl_rxq->base;
+
+ /* Split queue */
+ if (!q->adapter->is_rx_singleq) {
+ if (q->bufq2)
+ cpfl_rx_split_bufq_release(q->bufq2);
+
+ if (q->bufq1)
+ cpfl_rx_split_bufq_release(q->bufq1);
+
+ rte_free(cpfl_rxq);
+ return;
+ }
+
+ /* Single queue */
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(cpfl_rxq);
+}
+
+static void
+cpfl_tx_queue_release(void *txq)
+{
+ struct cpfl_tx_queue *cpfl_txq = txq;
+ struct idpf_tx_queue *q = NULL;
+
+ if (cpfl_txq == NULL)
+ return;
+
+ q = &cpfl_txq->base;
+
+ if (q->complq) {
+ rte_memzone_free(q->complq->mz);
+ rte_free(q->complq);
+ }
+
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(cpfl_txq);
+}
+
int
cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct idpf_hw *hw = &base->hw;
+ struct cpfl_rx_queue *cpfl_rxq;
const struct rte_memzone *mz;
struct idpf_rx_queue *rxq;
uint16_t rx_free_thresh;
@@ -248,21 +303,23 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Free memory if needed */
if (dev->data->rx_queues[queue_idx] != NULL) {
- idpf_qc_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
dev->data->rx_queues[queue_idx] = NULL;
}
/* Setup Rx queue */
- rxq = rte_zmalloc_socket("cpfl rxq",
- sizeof(struct idpf_rx_queue),
+ cpfl_rxq = rte_zmalloc_socket("cpfl rxq",
+ sizeof(struct cpfl_rx_queue),
RTE_CACHE_LINE_SIZE,
socket_id);
- if (rxq == NULL) {
+ if (cpfl_rxq == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
ret = -ENOMEM;
goto err_rxq_alloc;
}
+ rxq = &cpfl_rxq->base;
+
is_splitq = !!(vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
rxq->mp = mp;
@@ -329,7 +386,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
rxq->q_set = true;
- dev->data->rx_queues[queue_idx] = rxq;
+ dev->data->rx_queues[queue_idx] = cpfl_rxq;
return 0;
@@ -349,7 +406,8 @@ cpfl_tx_complq_setup(struct rte_eth_dev *dev, struct idpf_tx_queue *txq,
uint16_t queue_idx, uint16_t nb_desc,
unsigned int socket_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
const struct rte_memzone *mz;
struct idpf_tx_queue *cq;
int ret;
@@ -397,9 +455,11 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t tx_rs_thresh, tx_free_thresh;
+ struct cpfl_tx_queue *cpfl_txq;
struct idpf_hw *hw = &base->hw;
const struct rte_memzone *mz;
struct idpf_tx_queue *txq;
@@ -419,21 +479,23 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Free memory if needed. */
if (dev->data->tx_queues[queue_idx] != NULL) {
- idpf_qc_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
dev->data->tx_queues[queue_idx] = NULL;
}
/* Allocate the TX queue data structure. */
- txq = rte_zmalloc_socket("cpfl txq",
- sizeof(struct idpf_tx_queue),
+ cpfl_txq = rte_zmalloc_socket("cpfl txq",
+ sizeof(struct cpfl_tx_queue),
RTE_CACHE_LINE_SIZE,
socket_id);
- if (txq == NULL) {
+ if (cpfl_txq == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
ret = -ENOMEM;
goto err_txq_alloc;
}
+ txq = &cpfl_txq->base;
+
is_splitq = !!(vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
txq->nb_tx_desc = nb_desc;
@@ -487,7 +549,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
queue_idx * vport->chunks_info.tx_qtail_spacing);
txq->ops = &def_txq_ops;
txq->q_set = true;
- dev->data->tx_queues[queue_idx] = txq;
+ dev->data->tx_queues[queue_idx] = cpfl_txq;
return 0;
@@ -503,6 +565,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
+ struct cpfl_rx_queue *cpfl_rxq;
struct idpf_rx_queue *rxq;
uint16_t max_pkt_len;
uint32_t frame_size;
@@ -511,7 +574,8 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (rx_queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
- rxq = dev->data->rx_queues[rx_queue_id];
+ cpfl_rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = &cpfl_rxq->base;
if (rxq == NULL || !rxq->q_set) {
PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
@@ -575,9 +639,10 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_rx_queue *rxq =
- dev->data->rx_queues[rx_queue_id];
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq = dev->data->rx_queues[rx_queue_id];
+ struct idpf_rx_queue *rxq = &cpfl_rxq->base;
int err = 0;
err = idpf_vc_rxq_config(vport, rxq);
@@ -610,15 +675,15 @@ cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
if (tx_queue_id >= dev->data->nb_tx_queues)
return -EINVAL;
- txq = dev->data->tx_queues[tx_queue_id];
+ cpfl_txq = dev->data->tx_queues[tx_queue_id];
/* Init the RX tail register. */
- IDPF_PCI_REG_WRITE(txq->qtx_tail, 0);
+ IDPF_PCI_REG_WRITE(cpfl_txq->base.qtx_tail, 0);
return 0;
}
@@ -626,12 +691,13 @@ cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
int
cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_tx_queue *txq =
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_tx_queue *cpfl_txq =
dev->data->tx_queues[tx_queue_id];
int err = 0;
- err = idpf_vc_txq_config(vport, txq);
+ err = idpf_vc_txq_config(vport, &cpfl_txq->base);
if (err != 0) {
PMD_DRV_LOG(ERR, "Fail to configure Tx queue %u", tx_queue_id);
return err;
@@ -650,7 +716,7 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
tx_queue_id);
} else {
- txq->q_started = true;
+ cpfl_txq->base.q_started = true;
dev->data->tx_queue_state[tx_queue_id] =
RTE_ETH_QUEUE_STATE_STARTED;
}
@@ -661,13 +727,16 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
int
cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq;
struct idpf_rx_queue *rxq;
int err;
if (rx_queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
+ cpfl_rxq = dev->data->rx_queues[rx_queue_id];
err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
@@ -675,7 +744,7 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return err;
}
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = &cpfl_rxq->base;
rxq->q_started = false;
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
rxq->ops->release_mbufs(rxq);
@@ -693,13 +762,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_tx_queue *cpfl_txq;
struct idpf_tx_queue *txq;
int err;
if (tx_queue_id >= dev->data->nb_tx_queues)
return -EINVAL;
+ cpfl_txq = dev->data->tx_queues[tx_queue_id];
+
err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
@@ -707,7 +780,7 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
return err;
}
- txq = dev->data->tx_queues[tx_queue_id];
+ txq = &cpfl_txq->base;
txq->q_started = false;
txq->ops->release_mbufs(txq);
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
@@ -724,25 +797,25 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
void
cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- idpf_qc_rx_queue_release(dev->data->rx_queues[qid]);
+ cpfl_rx_queue_release(dev->data->rx_queues[qid]);
}
void
cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- idpf_qc_tx_queue_release(dev->data->tx_queues[qid]);
+ cpfl_tx_queue_release(dev->data->tx_queues[qid]);
}
void
cpfl_stop_queues(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
- struct idpf_tx_queue *txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
int i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- if (rxq == NULL)
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq == NULL)
continue;
if (cpfl_rx_queue_stop(dev, i) != 0)
@@ -750,8 +823,8 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL)
continue;
if (cpfl_tx_queue_stop(dev, i) != 0)
@@ -762,9 +835,10 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
void
cpfl_set_rx_function(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
#ifdef RTE_ARCH_X86
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i;
if (cpfl_rx_vec_dev_check_default(dev) == CPFL_VECTOR_PATH &&
@@ -790,8 +864,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- (void)idpf_qc_splitq_rx_vec_setup(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ (void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
if (vport->rx_use_avx512) {
@@ -810,8 +884,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
} else {
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- (void)idpf_qc_singleq_rx_vec_setup(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ (void)idpf_qc_singleq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
if (vport->rx_use_avx512) {
@@ -860,10 +934,11 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
void
cpfl_set_tx_function(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
#ifdef RTE_ARCH_X86
#ifdef CC_AVX512_SUPPORT
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
int i;
#endif /* CC_AVX512_SUPPORT */
@@ -878,8 +953,8 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
vport->tx_use_avx512 = true;
if (vport->tx_use_avx512) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- idpf_qc_tx_vec_avx512_setup(txq);
+ cpfl_txq = dev->data->tx_queues[i];
+ idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);
}
}
}
@@ -916,10 +991,10 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
#ifdef CC_AVX512_SUPPORT
if (vport->tx_use_avx512) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL)
continue;
- idpf_qc_tx_vec_avx512_setup(txq);
+ idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);
}
PMD_DRV_LOG(NOTICE,
"Using Single AVX512 Vector Tx (port %d).",
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index fb267d38c8..bfb9ad97bd 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -23,6 +23,14 @@
#define CPFL_SUPPORT_CHAIN_NUM 5
+struct cpfl_rx_queue {
+ struct idpf_rx_queue base;
+};
+
+struct cpfl_tx_queue {
+ struct idpf_tx_queue base;
+};
+
int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
index 665418d27d..5690b17911 100644
--- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
+++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
@@ -76,15 +76,16 @@ cpfl_rx_splitq_vec_default(struct idpf_rx_queue *rxq)
static inline int
cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_rx_queue *rxq;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq;
int i, default_ret, splitq_ret, ret = CPFL_SCALAR_PATH;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- default_ret = cpfl_rx_vec_queue_default(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
- splitq_ret = cpfl_rx_splitq_vec_default(rxq);
+ splitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq->base);
ret = splitq_ret && default_ret;
} else {
ret = default_ret;
@@ -100,12 +101,12 @@ static inline int
cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)
{
int i;
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
int ret = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- ret = cpfl_tx_vec_queue_default(txq);
+ cpfl_txq = dev->data->tx_queues[i];
+ ret = cpfl_tx_vec_queue_default(&cpfl_txq->base);
if (ret == CPFL_SCALAR_PATH)
return CPFL_SCALAR_PATH;
}
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v3 02/10] net/cpfl: support hairpin queue capbility get
2023-05-19 7:31 ` [PATCH v3 00/10] net/cpfl: add hairpin queue support beilei.xing
2023-05-19 7:31 ` [PATCH v3 01/10] net/cpfl: refine structures beilei.xing
@ 2023-05-19 7:31 ` beilei.xing
2023-05-24 14:30 ` Wu, Jingjing
2023-05-19 7:31 ` [PATCH v3 03/10] common/idpf: support queue groups add/delete beilei.xing
` (8 subsequent siblings)
10 siblings, 1 reply; 164+ messages in thread
From: beilei.xing @ 2023-05-19 7:31 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch adds hairpin_cap_get ops support.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 13 +++++++++++++
drivers/net/cpfl/cpfl_rxtx.h | 4 ++++
2 files changed, 17 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index e587155db6..b6fd0b05d0 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -154,6 +154,18 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,
return rte_eth_linkstatus_set(dev, &new_link);
}
+static int
+cpfl_hairpin_cap_get(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_hairpin_cap *cap)
+{
+ cap->max_nb_queues = CPFL_MAX_P2P_NB_QUEUES;
+ cap->max_rx_2_tx = CPFL_MAX_HAIRPINQ_RX_2_TX;
+ cap->max_tx_2_rx = CPFL_MAX_HAIRPINQ_TX_2_RX;
+ cap->max_nb_desc = CPFL_MAX_HAIRPINQ_NB_DESC;
+
+ return 0;
+}
+
static int
cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
@@ -885,6 +897,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.xstats_get = cpfl_dev_xstats_get,
.xstats_get_names = cpfl_dev_xstats_get_names,
.xstats_reset = cpfl_dev_xstats_reset,
+ .hairpin_cap_get = cpfl_hairpin_cap_get,
};
static int
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index bfb9ad97bd..b2b3537d10 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -13,6 +13,10 @@
#define CPFL_MIN_RING_DESC 32
#define CPFL_MAX_RING_DESC 4096
#define CPFL_DMA_MEM_ALIGN 4096
+#define CPFL_MAX_HAIRPINQ_RX_2_TX 1
+#define CPFL_MAX_HAIRPINQ_TX_2_RX 1
+#define CPFL_MAX_HAIRPINQ_NB_DESC 1024
+#define CPFL_MAX_P2P_NB_QUEUES 16
/* Base address of the HW descriptor ring should be 128B aligned. */
#define CPFL_RING_BASE_ALIGN 128
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v3 03/10] common/idpf: support queue groups add/delete
2023-05-19 7:31 ` [PATCH v3 00/10] net/cpfl: add hairpin queue support beilei.xing
2023-05-19 7:31 ` [PATCH v3 01/10] net/cpfl: refine structures beilei.xing
2023-05-19 7:31 ` [PATCH v3 02/10] net/cpfl: support hairpin queue capbility get beilei.xing
@ 2023-05-19 7:31 ` beilei.xing
2023-05-19 7:31 ` [PATCH v3 04/10] net/cpfl: add haipin queue group during vport init beilei.xing
` (7 subsequent siblings)
10 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-19 7:31 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds queue group add/delete virtual channel support.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 66 ++++++++++++++++++++++
drivers/common/idpf/idpf_common_virtchnl.h | 9 +++
drivers/common/idpf/version.map | 2 +
3 files changed, 77 insertions(+)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index b713678634..a3fe55c897 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -359,6 +359,72 @@ idpf_vc_vport_destroy(struct idpf_vport *vport)
return err;
}
+int
+idpf_vc_queue_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *p2p_queue_grps_info,
+ uint8_t *p2p_queue_grps_out)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_cmd_info args;
+ int size, qg_info_size;
+ int err = -1;
+
+ size = sizeof(*p2p_queue_grps_info) +
+ (p2p_queue_grps_info->qg_info.num_queue_groups - 1) *
+ sizeof(struct virtchnl2_queue_group_info);
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_ADD_QUEUE_GROUPS;
+ args.in_args = (uint8_t *)p2p_queue_grps_info;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ if (err != 0) {
+ DRV_LOG(ERR,
+ "Failed to execute command of VIRTCHNL2_OP_ADD_QUEUE_GROUPS");
+ return err;
+ }
+
+ rte_memcpy(p2p_queue_grps_out, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
+ return 0;
+}
+
+int idpf_vc_queue_grps_del(struct idpf_vport *vport,
+ uint16_t num_q_grps,
+ struct virtchnl2_queue_group_id *qg_ids)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_delete_queue_groups *vc_del_q_grps;
+ struct idpf_cmd_info args;
+ int size;
+ int err;
+
+ size = sizeof(*vc_del_q_grps) +
+ (num_q_grps - 1) * sizeof(struct virtchnl2_queue_group_id);
+ vc_del_q_grps = rte_zmalloc("vc_del_q_grps", size, 0);
+
+ vc_del_q_grps->vport_id = vport->vport_id;
+ vc_del_q_grps->num_queue_groups = num_q_grps;
+ memcpy(vc_del_q_grps->qg_ids, qg_ids,
+ num_q_grps * sizeof(struct virtchnl2_queue_group_id));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_DEL_QUEUE_GROUPS;
+ args.in_args = (uint8_t *)vc_del_q_grps;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_DEL_QUEUE_GROUPS");
+
+ rte_free(vc_del_q_grps);
+ return err;
+}
+
int
idpf_vc_rss_key_set(struct idpf_vport *vport)
{
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index c45295290e..58b16e1c5d 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -64,4 +64,13 @@ int idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
__rte_internal
int idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
u16 *buff_count, struct idpf_dma_mem **buffs);
+__rte_internal
+int idpf_vc_queue_grps_del(struct idpf_vport *vport,
+ uint16_t num_q_grps,
+ struct virtchnl2_queue_group_id *qg_ids);
+__rte_internal
+int
+idpf_vc_queue_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *ptp_queue_grps_info,
+ uint8_t *ptp_queue_grps_out);
#endif /* _IDPF_COMMON_VIRTCHNL_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 70334a1b03..01d18f3f3f 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -43,6 +43,8 @@ INTERNAL {
idpf_vc_irq_map_unmap_config;
idpf_vc_one_msg_read;
idpf_vc_ptype_info_query;
+ idpf_vc_queue_grps_add;
+ idpf_vc_queue_grps_del;
idpf_vc_queue_switch;
idpf_vc_queues_ena_dis;
idpf_vc_rss_hash_get;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v3 04/10] net/cpfl: add haipin queue group during vport init
2023-05-19 7:31 ` [PATCH v3 00/10] net/cpfl: add hairpin queue support beilei.xing
` (2 preceding siblings ...)
2023-05-19 7:31 ` [PATCH v3 03/10] common/idpf: support queue groups add/delete beilei.xing
@ 2023-05-19 7:31 ` beilei.xing
2023-05-24 14:38 ` Wu, Jingjing
2023-05-19 7:31 ` [PATCH v3 05/10] net/cpfl: support hairpin queue setup and release beilei.xing
` (6 subsequent siblings)
10 siblings, 1 reply; 164+ messages in thread
From: beilei.xing @ 2023-05-19 7:31 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds haipin queue group during vport init.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 130 +++++++++++++++++++++++++++++++++
drivers/net/cpfl/cpfl_ethdev.h | 18 +++++
drivers/net/cpfl/cpfl_rxtx.h | 4 +
3 files changed, 152 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index b6fd0b05d0..8e471d2a9b 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -852,6 +852,20 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
return 0;
}
+static int
+cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
+{
+ struct virtchnl2_queue_group_id qg_ids[CPFL_P2P_NB_QUEUE_GRPS] = {0};
+ int ret = 0;
+
+ qg_ids[0].queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
+ qg_ids[0].queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
+ ret = idpf_vc_queue_grps_del(vport, CPFL_P2P_NB_QUEUE_GRPS, qg_ids);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to delete p2p queue groups");
+ return ret;
+}
+
static int
cpfl_dev_close(struct rte_eth_dev *dev)
{
@@ -860,6 +874,10 @@ cpfl_dev_close(struct rte_eth_dev *dev)
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
+
+ if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
+ cpfl_p2p_queue_grps_del(vport);
+
idpf_vport_deinit(vport);
adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
@@ -1297,6 +1315,96 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter)
return vport_idx;
}
+static int
+cpfl_p2p_q_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *p2p_queue_grps_info,
+ uint8_t *p2p_q_vc_out_info)
+{
+ int ret;
+
+ p2p_queue_grps_info->vport_id = vport->vport_id;
+ p2p_queue_grps_info->qg_info.num_queue_groups = CPFL_P2P_NB_QUEUE_GRPS;
+ p2p_queue_grps_info->qg_info.groups[0].num_rx_q = CPFL_MAX_P2P_NB_QUEUES;
+ p2p_queue_grps_info->qg_info.groups[0].num_rx_bufq = CPFL_P2P_NB_RX_BUFQ;
+ p2p_queue_grps_info->qg_info.groups[0].num_tx_q = CPFL_MAX_P2P_NB_QUEUES;
+ p2p_queue_grps_info->qg_info.groups[0].num_tx_complq = CPFL_P2P_NB_TX_COMPLQ;
+ p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
+ p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
+ p2p_queue_grps_info->qg_info.groups[0].rx_q_grp_info.rss_lut_size = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.tx_tc = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.priority = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.is_sp = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.pir_weight = 0;
+
+ ret = idpf_vc_queue_grps_add(vport, p2p_queue_grps_info, p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to add p2p queue groups.");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
+ struct virtchnl2_add_queue_groups *p2p_q_vc_out_info)
+{
+ struct p2p_queue_chunks_info *p2p_q_chunks_info = &cpfl_vport->p2p_q_chunks_info;
+ struct virtchnl2_queue_reg_chunks *vc_chunks_out;
+ int i, type;
+
+ if (p2p_q_vc_out_info->qg_info.groups[0].qg_id.queue_group_type !=
+ VIRTCHNL2_QUEUE_GROUP_P2P) {
+ PMD_DRV_LOG(ERR, "Add queue group response mismatch.");
+ return -EINVAL;
+ }
+
+ vc_chunks_out = &p2p_q_vc_out_info->qg_info.groups[0].chunks;
+
+ for (i = 0; i < vc_chunks_out->num_chunks; i++) {
+ type = vc_chunks_out->chunks[i].type;
+ switch (type) {
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ p2p_q_chunks_info->tx_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->tx_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->tx_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ p2p_q_chunks_info->rx_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->rx_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->rx_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ p2p_q_chunks_info->tx_compl_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->tx_compl_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->tx_compl_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ p2p_q_chunks_info->rx_buf_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->rx_buf_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->rx_buf_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported queue type");
+ break;
+ }
+ }
+
+ return 0;
+}
+
static int
cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
{
@@ -1306,6 +1414,8 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
struct cpfl_adapter_ext *adapter = param->adapter;
/* for sending create vport virtchnl msg prepare */
struct virtchnl2_create_vport create_vport_info;
+ struct virtchnl2_add_queue_groups p2p_queue_grps_info;
+ uint8_t p2p_q_vc_out_info[IDPF_DFLT_MBX_BUF_SIZE] = {0};
int ret = 0;
dev->dev_ops = &cpfl_eth_dev_ops;
@@ -1340,8 +1450,28 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
&dev->data->mac_addrs[0]);
+ if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
+ memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
+ ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to add p2p queue group.");
+ goto err_q_grps_add;
+ }
+ ret = cpfl_p2p_queue_info_init(cpfl_vport,
+ (struct virtchnl2_add_queue_groups *)p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to init p2p queue info.");
+ goto err_p2p_qinfo_init;
+ }
+ }
+
return 0;
+err_p2p_qinfo_init:
+ cpfl_p2p_queue_grps_del(vport);
+err_q_grps_add:
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
err_mac_addrs:
adapter->vports[param->idx] = NULL; /* reset */
idpf_vport_deinit(vport);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 81fe9ac4c3..65c9a195b2 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -56,6 +56,7 @@
/* Device IDs */
#define IDPF_DEV_ID_CPF 0x1453
+#define VIRTCHNL2_QUEUE_GROUP_P2P 0x100
struct cpfl_vport_param {
struct cpfl_adapter_ext *adapter;
@@ -69,8 +70,25 @@ struct cpfl_devargs {
uint16_t req_vport_nb;
};
+struct p2p_queue_chunks_info {
+ uint32_t tx_start_qid;
+ uint32_t rx_start_qid;
+ uint32_t tx_compl_start_qid;
+ uint32_t rx_buf_start_qid;
+
+ uint64_t tx_qtail_start;
+ uint32_t tx_qtail_spacing;
+ uint64_t rx_qtail_start;
+ uint32_t rx_qtail_spacing;
+ uint64_t tx_compl_qtail_start;
+ uint32_t tx_compl_qtail_spacing;
+ uint64_t rx_buf_qtail_start;
+ uint32_t rx_buf_qtail_spacing;
+};
+
struct cpfl_vport {
struct idpf_vport base;
+ struct p2p_queue_chunks_info p2p_q_chunks_info;
};
struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index b2b3537d10..3a87a1f4b3 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -17,6 +17,10 @@
#define CPFL_MAX_HAIRPINQ_TX_2_RX 1
#define CPFL_MAX_HAIRPINQ_NB_DESC 1024
#define CPFL_MAX_P2P_NB_QUEUES 16
+#define CPFL_P2P_NB_RX_BUFQ 1
+#define CPFL_P2P_NB_TX_COMPLQ 1
+#define CPFL_P2P_NB_QUEUE_GRPS 1
+#define CPFL_P2P_QUEUE_GRP_ID 1
/* Base address of the HW descriptor ring should be 128B aligned. */
#define CPFL_RING_BASE_ALIGN 128
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v3 05/10] net/cpfl: support hairpin queue setup and release
2023-05-19 7:31 ` [PATCH v3 00/10] net/cpfl: add hairpin queue support beilei.xing
` (3 preceding siblings ...)
2023-05-19 7:31 ` [PATCH v3 04/10] net/cpfl: add haipin queue group during vport init beilei.xing
@ 2023-05-19 7:31 ` beilei.xing
2023-05-24 9:01 ` Liu, Mingxia
2023-05-25 3:58 ` Wu, Jingjing
2023-05-19 7:31 ` [PATCH v3 06/10] net/cpfl: support hairpin queue configuration beilei.xing
` (5 subsequent siblings)
10 siblings, 2 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-19 7:31 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
Support hairpin Rx/Tx queue setup and release.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 6 +
drivers/net/cpfl/cpfl_ethdev.h | 12 +
drivers/net/cpfl/cpfl_rxtx.c | 373 +++++++++++++++++++++++-
drivers/net/cpfl/cpfl_rxtx.h | 26 ++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 4 +
5 files changed, 420 insertions(+), 1 deletion(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 8e471d2a9b..03813716ce 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -874,6 +874,10 @@ cpfl_dev_close(struct rte_eth_dev *dev)
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
+ if (cpfl_vport->p2p_mp) {
+ rte_mempool_free(cpfl_vport->p2p_mp);
+ cpfl_vport->p2p_mp = NULL;
+ }
if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
cpfl_p2p_queue_grps_del(vport);
@@ -916,6 +920,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.xstats_get_names = cpfl_dev_xstats_get_names,
.xstats_reset = cpfl_dev_xstats_reset,
.hairpin_cap_get = cpfl_hairpin_cap_get,
+ .rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
+ .tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
};
static int
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 65c9a195b2..a48344299c 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -89,6 +89,18 @@ struct p2p_queue_chunks_info {
struct cpfl_vport {
struct idpf_vport base;
struct p2p_queue_chunks_info p2p_q_chunks_info;
+
+ struct rte_mempool *p2p_mp;
+
+ uint16_t nb_data_rxq;
+ uint16_t nb_data_txq;
+ uint16_t nb_p2p_rxq;
+ uint16_t nb_p2p_txq;
+
+ struct idpf_rx_queue *p2p_rx_bufq;
+ struct idpf_tx_queue *p2p_tx_complq;
+ bool p2p_manual_bind;
+
};
struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 04a51b8d15..333a399e73 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -10,6 +10,79 @@
#include "cpfl_rxtx.h"
#include "cpfl_rxtx_vec_common.h"
+uint16_t
+cpfl_hw_qid_get(uint16_t start_qid, uint16_t offset)
+{
+ return start_qid + offset;
+}
+
+uint64_t
+cpfl_hw_qtail_get(uint64_t tail_start, uint16_t offset, uint64_t tail_spacing)
+{
+ return tail_start + offset * tail_spacing;
+}
+
+static inline void
+cpfl_tx_hairpin_descq_reset(struct idpf_tx_queue *txq)
+{
+ uint32_t i, size;
+
+ if (!txq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+ return;
+ }
+
+ size = txq->nb_tx_desc * CPFL_P2P_DESC_LEN;
+ for (i = 0; i < size; i++)
+ ((volatile char *)txq->desc_ring)[i] = 0;
+}
+
+static inline void
+cpfl_tx_hairpin_complq_reset(struct idpf_tx_queue *cq)
+{
+ uint32_t i, size;
+
+ if (!cq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to complq is NULL");
+ return;
+ }
+
+ size = cq->nb_tx_desc * CPFL_P2P_DESC_LEN;
+ for (i = 0; i < size; i++)
+ ((volatile char *)cq->compl_ring)[i] = 0;
+}
+
+static inline void
+cpfl_rx_hairpin_descq_reset(struct idpf_rx_queue *rxq)
+{
+ uint16_t len;
+ uint32_t i;
+
+ if (!rxq)
+ return;
+
+ len = rxq->nb_rx_desc;
+ for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
+ ((volatile char *)rxq->rx_ring)[i] = 0;
+}
+
+static inline void
+cpfl_rx_hairpin_bufq_reset(struct idpf_rx_queue *rxbq)
+{
+ uint16_t len;
+ uint32_t i;
+
+ if (!rxbq)
+ return;
+
+ len = rxbq->nb_rx_desc;
+ for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
+ ((volatile char *)rxbq->rx_ring)[i] = 0;
+
+ rxbq->bufq1 = NULL;
+ rxbq->bufq2 = NULL;
+}
+
static uint64_t
cpfl_rx_offload_convert(uint64_t offload)
{
@@ -234,7 +307,10 @@ cpfl_rx_queue_release(void *rxq)
/* Split queue */
if (!q->adapter->is_rx_singleq) {
- if (q->bufq2)
+ /* the mz is shared between Tx/Rx hairpin, let Rx_release
+ * free the buf, q->bufq1->mz and q->mz.
+ */
+ if (!cpfl_rxq->hairpin_info.hairpin_q && q->bufq2)
cpfl_rx_split_bufq_release(q->bufq2);
if (q->bufq1)
@@ -385,6 +461,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
}
+ cpfl_vport->nb_data_rxq++;
rxq->q_set = true;
dev->data->rx_queues[queue_idx] = cpfl_rxq;
@@ -548,6 +625,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
queue_idx * vport->chunks_info.tx_qtail_spacing);
txq->ops = &def_txq_ops;
+ cpfl_vport->nb_data_txq++;
txq->q_set = true;
dev->data->tx_queues[queue_idx] = cpfl_txq;
@@ -562,6 +640,297 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return ret;
}
+static int
+cpfl_rx_hairpin_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,
+ uint16_t logic_qid, uint16_t nb_desc)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct rte_mempool *mp;
+ char pool_name[RTE_MEMPOOL_NAMESIZE];
+
+ mp = cpfl_vport->p2p_mp;
+ if (!mp) {
+ snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "p2p_mb_pool_%u",
+ dev->data->port_id);
+ mp = rte_pktmbuf_pool_create(pool_name, CPFL_P2P_NB_MBUF, CPFL_P2P_CACHE_SIZE,
+ 0, CPFL_P2P_MBUF_SIZE, dev->device->numa_node);
+ if (!mp) {
+ PMD_INIT_LOG(ERR, "Failed to allocate mbuf pool for p2p");
+ return -ENOMEM;
+ }
+ cpfl_vport->p2p_mp = mp;
+ }
+
+ bufq->mp = mp;
+ bufq->nb_rx_desc = nb_desc;
+ bufq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info.rx_buf_start_qid, logic_qid);
+ bufq->port_id = dev->data->port_id;
+ bufq->adapter = adapter;
+ bufq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
+
+ bufq->sw_ring = rte_zmalloc("sw ring",
+ sizeof(struct rte_mbuf *) * nb_desc,
+ RTE_CACHE_LINE_SIZE);
+ if (!bufq->sw_ring) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
+ return -ENOMEM;
+ }
+
+ bufq->q_set = true;
+ bufq->ops = &def_rxq_ops;
+
+ return 0;
+}
+
+int
+cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf)
+{
+ struct cpfl_vport *cpfl_vport = (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter_base = vport->adapter;
+ uint16_t logic_qid = cpfl_vport->nb_p2p_rxq;
+ struct cpfl_rxq_hairpin_info *hairpin_info;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct idpf_rx_queue *bufq1 = NULL;
+ struct idpf_rx_queue *rxq;
+ uint16_t peer_port, peer_q;
+ uint16_t qid;
+ int ret;
+
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue.");
+ return -EINVAL;
+ }
+
+ if (conf->peer_count != 1) {
+ PMD_INIT_LOG(ERR, "Can't support Rx hairpin queue peer count %d", conf->peer_count);
+ return -EINVAL;
+ }
+
+ peer_port = conf->peers[0].port;
+ peer_q = conf->peers[0].queue;
+
+ if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
+ nb_desc > CPFL_MAX_RING_DESC ||
+ nb_desc < CPFL_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is invalid", nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory if needed */
+ if (dev->data->rx_queues[queue_idx]) {
+ cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* Setup Rx description queue */
+ cpfl_rxq = rte_zmalloc_socket("cpfl hairpin rxq",
+ sizeof(struct cpfl_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!cpfl_rxq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
+ return -ENOMEM;
+ }
+
+ rxq = &cpfl_rxq->base;
+ hairpin_info = &cpfl_rxq->hairpin_info;
+ rxq->nb_rx_desc = nb_desc * 2;
+ rxq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info.rx_start_qid, logic_qid);
+ rxq->port_id = dev->data->port_id;
+ rxq->adapter = adapter_base;
+ rxq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
+ hairpin_info->hairpin_q = true;
+ hairpin_info->peer_txp = peer_port;
+ hairpin_info->peer_txq_id = peer_q;
+
+ if (conf->manual_bind != 0)
+ cpfl_vport->p2p_manual_bind = true;
+ else
+ cpfl_vport->p2p_manual_bind = false;
+
+ /* setup 1 Rx buffer queue for the 1st hairpin rxq */
+ if (logic_qid == 0) {
+ bufq1 = rte_zmalloc_socket("hairpin rx bufq1",
+ sizeof(struct idpf_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!bufq1) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for hairpin Rx buffer queue 1.");
+ ret = -ENOMEM;
+ goto err_alloc_bufq1;
+ }
+ qid = 2 * logic_qid;
+ ret = cpfl_rx_hairpin_bufq_setup(dev, bufq1, qid, nb_desc);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to setup hairpin Rx buffer queue 1");
+ ret = -EINVAL;
+ goto err_setup_bufq1;
+ }
+ cpfl_vport->p2p_rx_bufq = bufq1;
+ }
+
+ rxq->bufq1 = cpfl_vport->p2p_rx_bufq;
+ rxq->bufq2 = NULL;
+
+ cpfl_vport->nb_p2p_rxq++;
+ rxq->q_set = true;
+ dev->data->rx_queues[queue_idx] = cpfl_rxq;
+
+ return 0;
+
+err_setup_bufq1:
+ rte_free(bufq1);
+err_alloc_bufq1:
+ rte_free(rxq);
+
+ return ret;
+}
+
+int
+cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter_base = vport->adapter;
+ uint16_t logic_qid = cpfl_vport->nb_p2p_txq;
+ struct cpfl_txq_hairpin_info *hairpin_info;
+ struct idpf_hw *hw = &adapter_base->hw;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct idpf_tx_queue *txq, *cq;
+ const struct rte_memzone *mz;
+ uint32_t ring_size;
+ uint16_t peer_port, peer_q;
+
+ if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue.");
+ return -EINVAL;
+ }
+
+ if (conf->peer_count != 1) {
+ PMD_INIT_LOG(ERR, "Can't support Tx hairpin queue peer count %d", conf->peer_count);
+ return -EINVAL;
+ }
+
+ peer_port = conf->peers[0].port;
+ peer_q = conf->peers[0].queue;
+
+ if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
+ nb_desc > CPFL_MAX_RING_DESC ||
+ nb_desc < CPFL_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is invalid",
+ nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory if needed. */
+ if (dev->data->tx_queues[queue_idx]) {
+ cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate the TX queue data structure. */
+ cpfl_txq = rte_zmalloc_socket("cpfl hairpin txq",
+ sizeof(struct cpfl_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!cpfl_txq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+ return -ENOMEM;
+ }
+
+ txq = &cpfl_txq->base;
+ hairpin_info = &cpfl_txq->hairpin_info;
+ /* Txq ring length should be 2 times of Tx completion queue size. */
+ txq->nb_tx_desc = nb_desc * 2;
+ txq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info.tx_start_qid, logic_qid);
+ txq->port_id = dev->data->port_id;
+ hairpin_info->hairpin_q = true;
+ hairpin_info->peer_rxp = peer_port;
+ hairpin_info->peer_rxq_id = peer_q;
+
+ if (conf->manual_bind != 0)
+ cpfl_vport->p2p_manual_bind = true;
+ else
+ cpfl_vport->p2p_manual_bind = false;
+
+ /* Always Tx hairpin queue allocates Tx HW ring */
+ ring_size = RTE_ALIGN(txq->nb_tx_desc * CPFL_P2P_DESC_LEN,
+ CPFL_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_ring", logic_qid,
+ ring_size + CPFL_P2P_RING_BUF,
+ CPFL_RING_BASE_ALIGN,
+ dev->device->numa_node);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+ return -ENOMEM;
+ }
+
+ txq->tx_ring_phys_addr = mz->iova;
+ txq->desc_ring = mz->addr;
+ txq->mz = mz;
+
+ cpfl_tx_hairpin_descq_reset(txq);
+ txq->qtx_tail = hw->hw_addr +
+ cpfl_hw_qtail_get(cpfl_vport->p2p_q_chunks_info.tx_qtail_start,
+ logic_qid, cpfl_vport->p2p_q_chunks_info.tx_qtail_spacing);
+ txq->ops = &def_txq_ops;
+
+ if (cpfl_vport->p2p_tx_complq == NULL) {
+ cq = rte_zmalloc_socket("cpfl hairpin cq",
+ sizeof(struct idpf_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ dev->device->numa_node);
+ if (!cq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+ return -ENOMEM;
+ }
+
+ cq->nb_tx_desc = nb_desc;
+ cq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info.tx_compl_start_qid, 0);
+ cq->port_id = dev->data->port_id;
+
+ /* Tx completion queue always allocates the HW ring */
+ ring_size = RTE_ALIGN(cq->nb_tx_desc * CPFL_P2P_DESC_LEN,
+ CPFL_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_compl_ring", logic_qid,
+ ring_size + CPFL_P2P_RING_BUF,
+ CPFL_RING_BASE_ALIGN,
+ dev->device->numa_node);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX completion queue");
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+ return -ENOMEM;
+ }
+ cq->tx_ring_phys_addr = mz->iova;
+ cq->compl_ring = mz->addr;
+ cq->mz = mz;
+
+ cpfl_tx_hairpin_complq_reset(cq);
+ cpfl_vport->p2p_tx_complq = cq;
+ }
+
+ txq->complq = cpfl_vport->p2p_tx_complq;
+
+ cpfl_vport->nb_p2p_txq++;
+ txq->q_set = true;
+ dev->data->tx_queues[queue_idx] = cpfl_txq;
+
+ return 0;
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
@@ -865,6 +1234,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ continue;
(void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 3a87a1f4b3..5e9f2dada7 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -13,6 +13,7 @@
#define CPFL_MIN_RING_DESC 32
#define CPFL_MAX_RING_DESC 4096
#define CPFL_DMA_MEM_ALIGN 4096
+#define CPFL_P2P_DESC_LEN 16
#define CPFL_MAX_HAIRPINQ_RX_2_TX 1
#define CPFL_MAX_HAIRPINQ_TX_2_RX 1
#define CPFL_MAX_HAIRPINQ_NB_DESC 1024
@@ -21,6 +22,10 @@
#define CPFL_P2P_NB_TX_COMPLQ 1
#define CPFL_P2P_NB_QUEUE_GRPS 1
#define CPFL_P2P_QUEUE_GRP_ID 1
+#define CPFL_P2P_NB_MBUF 4096
+#define CPFL_P2P_CACHE_SIZE 250
+#define CPFL_P2P_MBUF_SIZE 2048
+#define CPFL_P2P_RING_BUF 128
/* Base address of the HW descriptor ring should be 128B aligned. */
#define CPFL_RING_BASE_ALIGN 128
@@ -31,12 +36,26 @@
#define CPFL_SUPPORT_CHAIN_NUM 5
+struct cpfl_rxq_hairpin_info {
+ bool hairpin_q; /* if rx queue is a hairpin queue */
+ uint16_t peer_txp;
+ uint16_t peer_txq_id;
+};
+
struct cpfl_rx_queue {
struct idpf_rx_queue base;
+ struct cpfl_rxq_hairpin_info hairpin_info;
+};
+
+struct cpfl_txq_hairpin_info {
+ bool hairpin_q; /* if tx queue is a hairpin queue */
+ uint16_t peer_rxp;
+ uint16_t peer_rxq_id;
};
struct cpfl_tx_queue {
struct idpf_tx_queue base;
+ struct cpfl_txq_hairpin_info hairpin_info;
};
int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
@@ -57,4 +76,11 @@ void cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void cpfl_set_rx_function(struct rte_eth_dev *dev);
void cpfl_set_tx_function(struct rte_eth_dev *dev);
+uint16_t cpfl_hw_qid_get(uint16_t start_qid, uint16_t offset);
+uint64_t cpfl_hw_qtail_get(uint64_t tail_start, uint16_t offset, uint64_t tail_spacing);
+int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, const struct rte_eth_hairpin_conf *conf);
+int cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf);
#endif /* _CPFL_RXTX_H_ */
diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
index 5690b17911..d8e9191196 100644
--- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
+++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
@@ -85,6 +85,8 @@ cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)
cpfl_rxq = dev->data->rx_queues[i];
default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ continue;
splitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq->base);
ret = splitq_ret && default_ret;
} else {
@@ -106,6 +108,8 @@ cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_tx_queues; i++) {
cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq->hairpin_info.hairpin_q)
+ continue;
ret = cpfl_tx_vec_queue_default(&cpfl_txq->base);
if (ret == CPFL_SCALAR_PATH)
return CPFL_SCALAR_PATH;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v3 06/10] net/cpfl: support hairpin queue configuration
2023-05-19 7:31 ` [PATCH v3 00/10] net/cpfl: add hairpin queue support beilei.xing
` (4 preceding siblings ...)
2023-05-19 7:31 ` [PATCH v3 05/10] net/cpfl: support hairpin queue setup and release beilei.xing
@ 2023-05-19 7:31 ` beilei.xing
2023-05-19 7:31 ` [PATCH v3 07/10] net/cpfl: support hairpin queue start/stop beilei.xing
` (4 subsequent siblings)
10 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-19 7:31 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx hairpin queue configuration.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 70 +++++++++++
drivers/common/idpf/idpf_common_virtchnl.h | 6 +
drivers/common/idpf/version.map | 2 +
drivers/net/cpfl/cpfl_ethdev.c | 136 ++++++++++++++++++++-
drivers/net/cpfl/cpfl_rxtx.c | 80 ++++++++++++
drivers/net/cpfl/cpfl_rxtx.h | 7 ++
6 files changed, 295 insertions(+), 6 deletions(-)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index a3fe55c897..211b44a88e 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -1050,6 +1050,41 @@ idpf_vc_rxq_config(struct idpf_vport *vport, struct idpf_rx_queue *rxq)
return err;
}
+int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct virtchnl2_rxq_info *rxq_info,
+ uint16_t num_qs)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+ struct idpf_cmd_info args;
+ int size, err, i;
+
+ size = sizeof(*vc_rxqs) + (num_qs - 1) *
+ sizeof(struct virtchnl2_rxq_info);
+ vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+ if (vc_rxqs == NULL) {
+ DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+ err = -ENOMEM;
+ return err;
+ }
+ vc_rxqs->vport_id = vport->vport_id;
+ vc_rxqs->num_qinfo = num_qs;
+ memcpy(vc_rxqs->qinfo, rxq_info, num_qs * sizeof(struct virtchnl2_rxq_info));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+ args.in_args = (uint8_t *)vc_rxqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ rte_free(vc_rxqs);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+ return err;
+}
+
int
idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq)
{
@@ -1121,6 +1156,41 @@ idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq)
return err;
}
+int
+idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
+ uint16_t num_qs)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+ struct idpf_cmd_info args;
+ int size, err;
+
+ size = sizeof(*vc_txqs) + (num_qs - 1) * sizeof(struct virtchnl2_txq_info);
+ vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+ if (vc_txqs == NULL) {
+ DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+ err = -ENOMEM;
+ return err;
+ }
+ vc_txqs->vport_id = vport->vport_id;
+ vc_txqs->num_qinfo = num_qs;
+ memcpy(vc_txqs->qinfo, txq_info, num_qs * sizeof(struct virtchnl2_txq_info));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+ args.in_args = (uint8_t *)vc_txqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ rte_free(vc_txqs);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+ return err;
+}
+
int
idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
struct idpf_ctlq_msg *q_msg)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index 58b16e1c5d..db83761a5e 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -65,6 +65,12 @@ __rte_internal
int idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
u16 *buff_count, struct idpf_dma_mem **buffs);
__rte_internal
+int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct virtchnl2_rxq_info *rxq_info,
+ uint16_t num_qs);
+__rte_internal
+int idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
+ uint16_t num_qs);
+__rte_internal
int idpf_vc_queue_grps_del(struct idpf_vport *vport,
uint16_t num_q_grps,
struct virtchnl2_queue_group_id *qg_ids);
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 01d18f3f3f..17e77884ce 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -54,8 +54,10 @@ INTERNAL {
idpf_vc_rss_lut_get;
idpf_vc_rss_lut_set;
idpf_vc_rxq_config;
+ idpf_vc_rxq_config_by_info;
idpf_vc_stats_query;
idpf_vc_txq_config;
+ idpf_vc_txq_config_by_info;
idpf_vc_vectors_alloc;
idpf_vc_vectors_dealloc;
idpf_vc_vport_create;
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 03813716ce..2921e52757 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -737,33 +737,157 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
return idpf_vport_irq_map_config(vport, nb_rx_queues);
}
+/* Update hairpin_info for dev's tx hairpin queue */
+static int
+cpfl_txq_hairpin_info_update(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port];
+ struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private;
+ struct cpfl_txq_hairpin_info *hairpin_info;
+ struct cpfl_tx_queue *cpfl_txq;
+ int i;
+
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ hairpin_info = &cpfl_txq->hairpin_info;
+ if (hairpin_info->peer_rxp != rx_port) {
+ PMD_DRV_LOG(ERR, "port %d is not the peer port", rx_port);
+ return -EINVAL;
+ }
+ hairpin_info->peer_rxq_id =
+ cpfl_hw_qid_get(cpfl_rx_vport->p2p_q_chunks_info.rx_start_qid,
+ hairpin_info->peer_rxq_id - cpfl_rx_vport->nb_data_rxq);
+ }
+
+ return 0;
+}
+
+/* Bind Rx hairpin queue's memory zone to peer Tx hairpin queue's memory zone */
+static void
+cpfl_rxq_hairpin_mz_bind(struct rte_eth_dev *dev)
+{
+ struct cpfl_vport *cpfl_rx_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_rx_vport->base;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_hw *hw = &adapter->hw;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct rte_eth_dev *peer_dev;
+ const struct rte_memzone *mz;
+ uint16_t peer_tx_port;
+ uint16_t peer_tx_qid;
+ int i;
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ peer_tx_port = cpfl_rxq->hairpin_info.peer_txp;
+ peer_tx_qid = cpfl_rxq->hairpin_info.peer_txq_id;
+ peer_dev = &rte_eth_devices[peer_tx_port];
+ cpfl_txq = peer_dev->data->tx_queues[peer_tx_qid];
+
+ /* bind rx queue */
+ mz = cpfl_txq->base.mz;
+ cpfl_rxq->base.rx_ring_phys_addr = mz->iova;
+ cpfl_rxq->base.rx_ring = mz->addr;
+ cpfl_rxq->base.mz = mz;
+
+ /* bind rx buffer queue */
+ mz = cpfl_txq->base.complq->mz;
+ cpfl_rxq->base.bufq1->rx_ring_phys_addr = mz->iova;
+ cpfl_rxq->base.bufq1->rx_ring = mz->addr;
+ cpfl_rxq->base.bufq1->mz = mz;
+ cpfl_rxq->base.bufq1->qrx_tail = hw->hw_addr +
+ cpfl_hw_qtail_get(cpfl_rx_vport->p2p_q_chunks_info.rx_buf_qtail_start,
+ 0, cpfl_rx_vport->p2p_q_chunks_info.rx_buf_qtail_spacing);
+ }
+}
+
static int
cpfl_start_queues(struct rte_eth_dev *dev)
{
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_rx_queue *cpfl_rxq;
struct cpfl_tx_queue *cpfl_txq;
+ int update_flag = 0;
int err = 0;
int i;
+ /* For normal data queues, configure, init and enale Txq.
+ * For non-manual bind hairpin queues, configure Txq.
+ */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
cpfl_txq = dev->data->tx_queues[i];
if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
continue;
- err = cpfl_tx_queue_start(dev, i);
+ if (!cpfl_txq->hairpin_info.hairpin_q) {
+ err = cpfl_tx_queue_start(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
+ return err;
+ }
+ } else if (!cpfl_vport->p2p_manual_bind) {
+ if (update_flag == 0) {
+ err = cpfl_txq_hairpin_info_update(dev,
+ cpfl_txq->hairpin_info.peer_rxp);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to update Tx hairpin queue info");
+ return err;
+ }
+ update_flag = 1;
+ }
+ err = cpfl_hairpin_txq_config(vport, cpfl_txq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u", i);
+ return err;
+ }
+ }
+ }
+
+ /* For non-manual bind hairpin queues, configure Tx completion queue first.*/
+ if (!cpfl_vport->p2p_manual_bind && cpfl_vport->p2p_tx_complq != NULL) {
+ err = cpfl_hairpin_tx_complq_config(cpfl_vport);
if (err != 0) {
- PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
+ PMD_DRV_LOG(ERR, "Fail to config Tx completion queue");
return err;
}
}
+ /* For non-manual bind hairpin queues, configure Rx buffer queue.*/
+ if (!cpfl_vport->p2p_manual_bind && cpfl_vport->p2p_rx_bufq != NULL) {
+ cpfl_rxq_hairpin_mz_bind(dev);
+ err = cpfl_hairpin_rx_bufq_config(cpfl_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue");
+ return err;
+ }
+ }
+
+ /* For normal data queues, configure, init and enale Rxq.
+ * For non-manual bind hairpin queues, configure Rxq, and then init Rxq.
+ */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)
continue;
- err = cpfl_rx_queue_start(dev, i);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
- return err;
+ if (!cpfl_rxq->hairpin_info.hairpin_q) {
+ err = cpfl_rx_queue_start(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
+ return err;
+ }
+ } else if (!cpfl_vport->p2p_manual_bind) {
+ err = cpfl_hairpin_rxq_config(vport, cpfl_rxq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue %u", i);
+ return err;
+ }
+ err = cpfl_rx_queue_init(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u", i);
+ return err;
+ }
}
}
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 333a399e73..91d1ab1ffc 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -931,6 +931,86 @@ cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return 0;
}
+int
+cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport)
+{
+ struct idpf_rx_queue *rx_bufq = cpfl_vport->p2p_rx_bufq;
+ struct virtchnl2_rxq_info rxq_info[1] = {0};
+
+ rxq_info[0].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ rxq_info[0].queue_id = rx_bufq->queue_id;
+ rxq_info[0].ring_len = rx_bufq->nb_rx_desc;
+ rxq_info[0].dma_ring_addr = rx_bufq->rx_ring_phys_addr;
+ rxq_info[0].desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ rxq_info[0].rx_buffer_low_watermark = CPFL_RXBUF_LOW_WATERMARK;
+ rxq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ rxq_info[0].data_buffer_size = rx_bufq->rx_buf_len;
+ rxq_info[0].buffer_notif_stride = CPFL_RX_BUF_STRIDE;
+
+ return idpf_vc_rxq_config_by_info(&cpfl_vport->base, rxq_info, 1);
+}
+
+int
+cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq)
+{
+ struct virtchnl2_rxq_info rxq_info[1] = {0};
+ struct idpf_rx_queue *rxq = &cpfl_rxq->base;
+
+ rxq_info[0].type = VIRTCHNL2_QUEUE_TYPE_RX;
+ rxq_info[0].queue_id = rxq->queue_id;
+ rxq_info[0].ring_len = rxq->nb_rx_desc;
+ rxq_info[0].dma_ring_addr = rxq->rx_ring_phys_addr;
+ rxq_info[0].rx_bufq1_id = rxq->bufq1->queue_id;
+ rxq_info[0].max_pkt_size = vport->max_pkt_len;
+ rxq_info[0].desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ rxq_info[0].qflags |= VIRTCHNL2_RX_DESC_SIZE_16BYTE;
+
+ rxq_info[0].data_buffer_size = rxq->rx_buf_len;
+ rxq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ rxq_info[0].rx_buffer_low_watermark = CPFL_RXBUF_LOW_WATERMARK;
+
+ PMD_DRV_LOG(NOTICE, "hairpin: vport %u, Rxq id 0x%x",
+ vport->vport_id, rxq_info[0].queue_id);
+
+ return idpf_vc_rxq_config_by_info(vport, rxq_info, 1);
+}
+
+int
+cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport)
+{
+ struct idpf_tx_queue *tx_complq = cpfl_vport->p2p_tx_complq;
+ struct virtchnl2_txq_info txq_info[1] = {0};
+
+ txq_info[0].dma_ring_addr = tx_complq->tx_ring_phys_addr;
+ txq_info[0].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ txq_info[0].queue_id = tx_complq->queue_id;
+ txq_info[0].ring_len = tx_complq->nb_tx_desc;
+ txq_info[0].peer_rx_queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+ txq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ txq_info[0].sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+
+ return idpf_vc_txq_config_by_info(&cpfl_vport->base, txq_info, 1);
+}
+
+int
+cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq)
+{
+ struct idpf_tx_queue *txq = &cpfl_txq->base;
+ struct virtchnl2_txq_info txq_info[1] = {0};
+
+ txq_info[0].dma_ring_addr = txq->tx_ring_phys_addr;
+ txq_info[0].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ txq_info[0].queue_id = txq->queue_id;
+ txq_info[0].ring_len = txq->nb_tx_desc;
+ txq_info[0].tx_compl_queue_id = txq->complq->queue_id;
+ txq_info[0].relative_queue_id = txq->queue_id;
+ txq_info[0].peer_rx_queue_id = cpfl_txq->hairpin_info.peer_rxq_id;
+ txq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ txq_info[0].sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+
+ return idpf_vc_txq_config_by_info(vport, txq_info, 1);
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 5e9f2dada7..5e2ddd1ef9 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -30,12 +30,15 @@
#define CPFL_RING_BASE_ALIGN 128
#define CPFL_DEFAULT_RX_FREE_THRESH 32
+#define CPFL_RXBUF_LOW_WATERMARK 64
#define CPFL_DEFAULT_TX_RS_THRESH 32
#define CPFL_DEFAULT_TX_FREE_THRESH 32
#define CPFL_SUPPORT_CHAIN_NUM 5
+#define CPFL_RX_BUF_STRIDE 64
+
struct cpfl_rxq_hairpin_info {
bool hairpin_q; /* if rx queue is a hairpin queue */
uint16_t peer_txp;
@@ -83,4 +86,8 @@ int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
int cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc,
const struct rte_eth_hairpin_conf *conf);
+int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport);
+int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq);
+int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
+int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
#endif /* _CPFL_RXTX_H_ */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v3 07/10] net/cpfl: support hairpin queue start/stop
2023-05-19 7:31 ` [PATCH v3 00/10] net/cpfl: add hairpin queue support beilei.xing
` (5 preceding siblings ...)
2023-05-19 7:31 ` [PATCH v3 06/10] net/cpfl: support hairpin queue configuration beilei.xing
@ 2023-05-19 7:31 ` beilei.xing
2023-05-25 4:12 ` Wu, Jingjing
2023-05-19 7:31 ` [PATCH v3 08/10] net/cpfl: enable write back based on ITR expire beilei.xing
` (3 subsequent siblings)
10 siblings, 1 reply; 164+ messages in thread
From: beilei.xing @ 2023-05-19 7:31 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx hairpin queue start/stop.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 2 +-
drivers/common/idpf/idpf_common_virtchnl.h | 3 +
drivers/common/idpf/version.map | 1 +
drivers/net/cpfl/cpfl_ethdev.c | 41 ++++++
drivers/net/cpfl/cpfl_rxtx.c | 153 ++++++++++++++++++---
drivers/net/cpfl/cpfl_rxtx.h | 14 ++
6 files changed, 195 insertions(+), 19 deletions(-)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index 211b44a88e..6455f640da 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -733,7 +733,7 @@ idpf_vc_vectors_dealloc(struct idpf_vport *vport)
return err;
}
-static int
+int
idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
uint32_t type, bool on)
{
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index db83761a5e..9ff5c38c26 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -71,6 +71,9 @@ __rte_internal
int idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
uint16_t num_qs);
__rte_internal
+int idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
+ uint32_t type, bool on);
+__rte_internal
int idpf_vc_queue_grps_del(struct idpf_vport *vport,
uint16_t num_q_grps,
struct virtchnl2_queue_group_id *qg_ids);
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 17e77884ce..25624732b0 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -40,6 +40,7 @@ INTERNAL {
idpf_vc_cmd_execute;
idpf_vc_ctlq_post_rx_buffs;
idpf_vc_ctlq_recv;
+ idpf_vc_ena_dis_one_queue;
idpf_vc_irq_map_unmap_config;
idpf_vc_one_msg_read;
idpf_vc_ptype_info_query;
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 2921e52757..c2ab0690fc 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -891,6 +891,47 @@ cpfl_start_queues(struct rte_eth_dev *dev)
}
}
+ /* For non-manual bind hairpin queues, enable Tx queue and Rx queue,
+ * then enable Tx completion queue and Rx buffer queue.
+ */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq->hairpin_info.hairpin_q && !cpfl_vport->p2p_manual_bind) {
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ i - cpfl_vport->nb_data_txq,
+ false, true);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u on",
+ i);
+ else
+ cpfl_txq->base.q_started = true;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q && !cpfl_vport->p2p_manual_bind) {
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ i - cpfl_vport->nb_data_rxq,
+ true, true);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u on",
+ i);
+ else
+ cpfl_rxq->base.q_started = true;
+ }
+ }
+
+ if (!cpfl_vport->p2p_manual_bind &&
+ cpfl_vport->p2p_tx_complq != NULL &&
+ cpfl_vport->p2p_rx_bufq != NULL) {
+ err = cpfl_switch_hairpin_bufq_complq(cpfl_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq and Rx bufq");
+ return err;
+ }
+ }
+
return err;
}
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 91d1ab1ffc..4b509a96f3 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -1011,6 +1011,83 @@ cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq
return idpf_vc_txq_config_by_info(vport, txq_info, 1);
}
+int
+cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ queue_id = cpfl_vport->p2p_tx_complq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+ if (err)
+ return err;
+
+ type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
+int
+cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t logic_qid,
+ bool rx, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = rx ? VIRTCHNL2_QUEUE_TYPE_RX : VIRTCHNL2_QUEUE_TYPE_TX;
+
+ if (type == VIRTCHNL2_QUEUE_TYPE_RX)
+ queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info.rx_start_qid, logic_qid);
+ else
+ queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info.tx_start_qid, logic_qid);
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+ if (err)
+ return err;
+
+ return err;
+}
+
+static int
+cpfl_alloc_split_p2p_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+ volatile struct virtchnl2_p2p_rx_buf_desc *rxd;
+ struct rte_mbuf *mbuf = NULL;
+ uint64_t dma_addr;
+ uint16_t i;
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ mbuf = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!mbuf)) {
+ PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+ return -ENOMEM;
+ }
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+ rxd = &((volatile struct virtchnl2_p2p_rx_buf_desc *)(rxq->rx_ring))[i];
+ rxd->reserve0 = 0;
+ rxd->pkt_addr = dma_addr;
+
+ rxq->sw_ring[i] = mbuf;
+ }
+
+ rxq->nb_rx_hold = 0;
+ /* The value written in the RX buffer queue tail register, must be a multiple of 8.*/
+ rxq->rx_tail = rxq->nb_rx_desc - CPFL_HAIRPIN_Q_TAIL_AUX_VALUE;
+
+ return 0;
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
@@ -1064,22 +1141,31 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
IDPF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
} else {
/* Split queue */
- err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
- return err;
- }
- err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
- return err;
+ if (cpfl_rxq->hairpin_info.hairpin_q) {
+ err = cpfl_alloc_split_p2p_rxq_mbufs(rxq->bufq1);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate p2p RX buffer queue mbuf");
+ return err;
+ }
+ } else {
+ err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
+ return err;
+ }
+ err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
+ return err;
+ }
}
rte_wmb();
/* Init the RX tail register. */
IDPF_PCI_REG_WRITE(rxq->bufq1->qrx_tail, rxq->bufq1->rx_tail);
- IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail);
+ if (rxq->bufq2)
+ IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail);
}
return err;
@@ -1186,7 +1272,12 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return -EINVAL;
cpfl_rxq = dev->data->rx_queues[rx_queue_id];
- err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ rx_queue_id - cpfl_vport->nb_data_txq,
+ true, false);
+ else
+ err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
rx_queue_id);
@@ -1200,10 +1291,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
idpf_qc_single_rx_queue_reset(rxq);
} else {
rxq->bufq1->ops->release_mbufs(rxq->bufq1);
- rxq->bufq2->ops->release_mbufs(rxq->bufq2);
- idpf_qc_split_rx_queue_reset(rxq);
+ if (rxq->bufq2)
+ rxq->bufq2->ops->release_mbufs(rxq->bufq2);
+ if (cpfl_rxq->hairpin_info.hairpin_q) {
+ cpfl_rx_hairpin_descq_reset(rxq);
+ cpfl_rx_hairpin_bufq_reset(rxq->bufq1);
+ } else {
+ idpf_qc_split_rx_queue_reset(rxq);
+ }
}
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ if (!cpfl_rxq->hairpin_info.hairpin_q)
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1222,7 +1320,12 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
cpfl_txq = dev->data->tx_queues[tx_queue_id];
- err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
+ if (cpfl_txq->hairpin_info.hairpin_q)
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ tx_queue_id - cpfl_vport->nb_data_txq,
+ false, false);
+ else
+ err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
tx_queue_id);
@@ -1235,10 +1338,17 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
idpf_qc_single_tx_queue_reset(txq);
} else {
- idpf_qc_split_tx_descq_reset(txq);
- idpf_qc_split_tx_complq_reset(txq->complq);
+ if (cpfl_txq->hairpin_info.hairpin_q) {
+ cpfl_tx_hairpin_descq_reset(txq);
+ cpfl_tx_hairpin_complq_reset(txq->complq);
+ } else {
+ idpf_qc_split_tx_descq_reset(txq);
+ idpf_qc_split_tx_complq_reset(txq->complq);
+ }
}
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ if (!cpfl_txq->hairpin_info.hairpin_q)
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1258,10 +1368,17 @@ cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
void
cpfl_stop_queues(struct rte_eth_dev *dev)
{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
struct cpfl_rx_queue *cpfl_rxq;
struct cpfl_tx_queue *cpfl_txq;
int i;
+ if (cpfl_vport->p2p_rx_bufq != NULL) {
+ if (cpfl_switch_hairpin_bufq_complq(cpfl_vport, false) != 0)
+ PMD_DRV_LOG(ERR, "Failed to stop hairpin Tx complq and Rx bufq");
+ }
+
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
if (cpfl_rxq == NULL)
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 5e2ddd1ef9..b961aad4ce 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -39,6 +39,17 @@
#define CPFL_RX_BUF_STRIDE 64
+/* The value written in the RX buffer queue tail register,
+ * and in WritePTR field in the TX completion queue context,
+ * must be a multiple of 8.
+ */
+#define CPFL_HAIRPIN_Q_TAIL_AUX_VALUE 8
+
+struct virtchnl2_p2p_rx_buf_desc {
+ __le64 reserve0;
+ __le64 pkt_addr; /* Packet buffer address */
+};
+
struct cpfl_rxq_hairpin_info {
bool hairpin_q; /* if rx queue is a hairpin queue */
uint16_t peer_txp;
@@ -90,4 +101,7 @@ int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq);
int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
+int cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t qid,
+ bool rx, bool on);
#endif /* _CPFL_RXTX_H_ */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v3 08/10] net/cpfl: enable write back based on ITR expire
2023-05-19 7:31 ` [PATCH v3 00/10] net/cpfl: add hairpin queue support beilei.xing
` (6 preceding siblings ...)
2023-05-19 7:31 ` [PATCH v3 07/10] net/cpfl: support hairpin queue start/stop beilei.xing
@ 2023-05-19 7:31 ` beilei.xing
2023-05-25 4:17 ` Wu, Jingjing
2023-05-19 7:31 ` [PATCH v3 09/10] net/cpfl: support peer ports get beilei.xing
` (2 subsequent siblings)
10 siblings, 1 reply; 164+ messages in thread
From: beilei.xing @ 2023-05-19 7:31 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch enabls write back based on ITR expire
(WR_ON_ITR) for hairpin queue.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_device.c | 75 ++++++++++++++++++++++++
drivers/common/idpf/idpf_common_device.h | 4 ++
drivers/common/idpf/version.map | 1 +
drivers/net/cpfl/cpfl_ethdev.c | 13 +++-
4 files changed, 92 insertions(+), 1 deletion(-)
diff --git a/drivers/common/idpf/idpf_common_device.c b/drivers/common/idpf/idpf_common_device.c
index dc47551b17..cc4207a46e 100644
--- a/drivers/common/idpf/idpf_common_device.c
+++ b/drivers/common/idpf/idpf_common_device.c
@@ -667,6 +667,81 @@ idpf_vport_irq_map_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
return ret;
}
+int
+idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport, uint32_t *qids, uint16_t nb_rx_queues)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_queue_vector *qv_map;
+ struct idpf_hw *hw = &adapter->hw;
+ uint32_t dynctl_val, itrn_val;
+ uint32_t dynctl_reg_start;
+ uint32_t itrn_reg_start;
+ uint16_t i;
+ int ret;
+
+ qv_map = rte_zmalloc("qv_map",
+ nb_rx_queues *
+ sizeof(struct virtchnl2_queue_vector), 0);
+ if (qv_map == NULL) {
+ DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
+ nb_rx_queues);
+ ret = -ENOMEM;
+ goto qv_map_alloc_err;
+ }
+
+ /* Rx interrupt disabled, Map interrupt only for writeback */
+
+ /* The capability flags adapter->caps.other_caps should be
+ * compared with bit VIRTCHNL2_CAP_WB_ON_ITR here. The if
+ * condition should be updated when the FW can return the
+ * correct flag bits.
+ */
+ dynctl_reg_start =
+ vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
+ itrn_reg_start =
+ vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
+ dynctl_val = IDPF_READ_REG(hw, dynctl_reg_start);
+ DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x", dynctl_val);
+ itrn_val = IDPF_READ_REG(hw, itrn_reg_start);
+ DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val);
+ /* Force write-backs by setting WB_ON_ITR bit in DYN_CTL
+ * register. WB_ON_ITR and INTENA are mutually exclusive
+ * bits. Setting WB_ON_ITR bits means TX and RX Descs
+ * are written back based on ITR expiration irrespective
+ * of INTENA setting.
+ */
+ /* TBD: need to tune INTERVAL value for better performance. */
+ itrn_val = (itrn_val == 0) ? IDPF_DFLT_INTERVAL : itrn_val;
+ dynctl_val = VIRTCHNL2_ITR_IDX_0 <<
+ PF_GLINT_DYN_CTL_ITR_INDX_S |
+ PF_GLINT_DYN_CTL_WB_ON_ITR_M |
+ itrn_val << PF_GLINT_DYN_CTL_INTERVAL_S;
+ IDPF_WRITE_REG(hw, dynctl_reg_start, dynctl_val);
+
+ for (i = 0; i < nb_rx_queues; i++) {
+ /* map all queues to the same vector */
+ qv_map[i].queue_id = qids[i];
+ qv_map[i].vector_id =
+ vport->recv_vectors->vchunks.vchunks->start_vector_id;
+ }
+ vport->qv_map = qv_map;
+
+ ret = idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, true);
+ if (ret != 0) {
+ DRV_LOG(ERR, "config interrupt mapping failed");
+ goto config_irq_map_err;
+ }
+
+ return 0;
+
+config_irq_map_err:
+ rte_free(vport->qv_map);
+ vport->qv_map = NULL;
+
+qv_map_alloc_err:
+ return ret;
+}
+
int
idpf_vport_irq_unmap_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
{
diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index 112367dae8..f767ea7cec 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -200,5 +200,9 @@ int idpf_vport_info_init(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_info);
__rte_internal
void idpf_vport_stats_update(struct virtchnl2_vport_stats *oes, struct virtchnl2_vport_stats *nes);
+__rte_internal
+int idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport,
+ uint32_t *qids,
+ uint16_t nb_rx_queues);
#endif /* _IDPF_COMMON_DEVICE_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 25624732b0..0729f6b912 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -69,6 +69,7 @@ INTERNAL {
idpf_vport_info_init;
idpf_vport_init;
idpf_vport_irq_map_config;
+ idpf_vport_irq_map_config_by_qids;
idpf_vport_irq_unmap_config;
idpf_vport_rss_config;
idpf_vport_stats_update;
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index c2ab0690fc..3b480178c0 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -730,11 +730,22 @@ cpfl_dev_configure(struct rte_eth_dev *dev)
static int
cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
{
+ uint32_t qids[CPFL_MAX_P2P_NB_QUEUES + IDPF_DEFAULT_RXQ_NUM] = {0};
struct cpfl_vport *cpfl_vport = dev->data->dev_private;
struct idpf_vport *vport = &cpfl_vport->base;
uint16_t nb_rx_queues = dev->data->nb_rx_queues;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i;
- return idpf_vport_irq_map_config(vport, nb_rx_queues);
+ for (i = 0; i < nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ qids[i] = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info.rx_start_qid,
+ (i - cpfl_vport->nb_data_rxq));
+ else
+ qids[i] = cpfl_hw_qid_get(vport->chunks_info.rx_start_qid, i);
+ }
+ return idpf_vport_irq_map_config_by_qids(vport, qids, nb_rx_queues);
}
/* Update hairpin_info for dev's tx hairpin queue */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v3 09/10] net/cpfl: support peer ports get
2023-05-19 7:31 ` [PATCH v3 00/10] net/cpfl: add hairpin queue support beilei.xing
` (7 preceding siblings ...)
2023-05-19 7:31 ` [PATCH v3 08/10] net/cpfl: enable write back based on ITR expire beilei.xing
@ 2023-05-19 7:31 ` beilei.xing
2023-05-25 5:26 ` Wu, Jingjing
2023-05-19 7:31 ` [PATCH v3 10/10] net/cpfl: support hairpin bind/unbind beilei.xing
2023-05-26 7:38 ` [PATCH v4 00/13] net/cpfl: add hairpin queue support beilei.xing
10 siblings, 1 reply; 164+ messages in thread
From: beilei.xing @ 2023-05-19 7:31 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports get hairpin peer ports.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 34 ++++++++++++++++++++++++++++++++++
1 file changed, 34 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 3b480178c0..59c7e75d2a 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1069,6 +1069,39 @@ cpfl_dev_close(struct rte_eth_dev *dev)
return 0;
}
+static int
+cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
+ __rte_unused size_t len, uint32_t tx)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_tx_queue *txq;
+ struct idpf_rx_queue *rxq;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i, j;
+
+ if (tx > 0) {
+ for (i = cpfl_vport->nb_data_txq, j = 0; i < dev->data->nb_tx_queues; i++, j++) {
+ txq = dev->data->tx_queues[i];
+ if (txq == NULL)
+ return -EINVAL;
+ cpfl_txq = (struct cpfl_tx_queue *)txq;
+ peer_ports[j] = cpfl_txq->hairpin_info.peer_rxp;
+ }
+ } else if (tx == 0) {
+ for (i = cpfl_vport->nb_data_rxq, j = 0; i < dev->data->nb_rx_queues; i++, j++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq == NULL)
+ return -EINVAL;
+ cpfl_rxq = (struct cpfl_rx_queue *)rxq;
+ peer_ports[j] = cpfl_rxq->hairpin_info.peer_txp;
+ }
+ }
+
+ return j;
+}
+
static const struct eth_dev_ops cpfl_eth_dev_ops = {
.dev_configure = cpfl_dev_configure,
.dev_close = cpfl_dev_close,
@@ -1098,6 +1131,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.hairpin_cap_get = cpfl_hairpin_cap_get,
.rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
.tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
+ .hairpin_get_peer_ports = cpfl_hairpin_get_peer_ports,
};
static int
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v3 10/10] net/cpfl: support hairpin bind/unbind
2023-05-19 7:31 ` [PATCH v3 00/10] net/cpfl: add hairpin queue support beilei.xing
` (8 preceding siblings ...)
2023-05-19 7:31 ` [PATCH v3 09/10] net/cpfl: support peer ports get beilei.xing
@ 2023-05-19 7:31 ` beilei.xing
2023-05-26 7:38 ` [PATCH v4 00/13] net/cpfl: add hairpin queue support beilei.xing
10 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-19 7:31 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports hairpin_bind/unbind ops.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 137 +++++++++++++++++++++++++++++++++
drivers/net/cpfl/cpfl_rxtx.c | 28 +++++++
drivers/net/cpfl/cpfl_rxtx.h | 2 +
3 files changed, 167 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 59c7e75d2a..7e8f059dfb 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1102,6 +1102,141 @@ cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
return j;
}
+static int
+cpfl_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct idpf_vport *tx_vport = &cpfl_tx_vport->base;
+ struct cpfl_vport *cpfl_rx_vport;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct rte_eth_dev *peer_dev;
+ struct idpf_vport *rx_vport;
+ int err = 0;
+ int i;
+
+ err = cpfl_txq_hairpin_info_update(dev, rx_port);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to update Tx hairpin queue info.");
+ return err;
+ }
+
+ /* configure hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ err = cpfl_hairpin_txq_config(tx_vport, cpfl_txq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u", i);
+ return err;
+ }
+ }
+
+ err = cpfl_hairpin_tx_complq_config(cpfl_tx_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Tx completion queue");
+ return err;
+ }
+
+ peer_dev = &rte_eth_devices[rx_port];
+ cpfl_rx_vport = (struct cpfl_vport *)peer_dev->data->dev_private;
+ rx_vport = &cpfl_rx_vport->base;
+ cpfl_rxq_hairpin_mz_bind(peer_dev);
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ err = cpfl_hairpin_rxq_config(rx_vport, cpfl_rxq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue %u", i);
+ return err;
+ }
+ err = cpfl_rx_queue_init(peer_dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u", i);
+ return err;
+ }
+ }
+
+ err = cpfl_hairpin_rx_bufq_config(cpfl_rx_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue");
+ return err;
+ }
+
+ /* enable hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport,
+ i - cpfl_tx_vport->nb_data_txq,
+ false, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u on",
+ i);
+ return err;
+ }
+ cpfl_txq->base.q_started = true;
+ }
+
+ err = cpfl_switch_hairpin_complq(cpfl_tx_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq");
+ return err;
+ }
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport,
+ i - cpfl_rx_vport->nb_data_rxq,
+ true, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u on",
+ i);
+ }
+ cpfl_rxq->base.q_started = true;
+ }
+
+ err = cpfl_switch_hairpin_bufq(cpfl_rx_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Rx buffer queue");
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+cpfl_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port];
+ struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i;
+
+ /* disable hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport,
+ i - cpfl_tx_vport->nb_data_txq,
+ false, false);
+ cpfl_txq->base.q_started = false;
+ }
+
+ cpfl_switch_hairpin_complq(cpfl_tx_vport, false);
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport,
+ i - cpfl_rx_vport->nb_data_rxq,
+ true, false);
+ cpfl_rxq->base.q_started = false;
+ }
+
+ cpfl_switch_hairpin_bufq(cpfl_rx_vport, false);
+
+ return 0;
+}
+
static const struct eth_dev_ops cpfl_eth_dev_ops = {
.dev_configure = cpfl_dev_configure,
.dev_close = cpfl_dev_close,
@@ -1132,6 +1267,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
.tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
.hairpin_get_peer_ports = cpfl_hairpin_get_peer_ports,
+ .hairpin_bind = cpfl_hairpin_bind,
+ .hairpin_unbind = cpfl_hairpin_unbind,
};
static int
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 4b509a96f3..6c252043fb 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -1031,6 +1031,34 @@ cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool on)
return err;
}
+int
+cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ queue_id = cpfl_vport->p2p_tx_complq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
+int
+cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
int
cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t logic_qid,
bool rx, bool on)
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index b961aad4ce..279c271520 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -102,6 +102,8 @@ int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl
int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
int cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on);
int cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t qid,
bool rx, bool on);
#endif /* _CPFL_RXTX_H_ */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH v3 05/10] net/cpfl: support hairpin queue setup and release
2023-05-19 7:31 ` [PATCH v3 05/10] net/cpfl: support hairpin queue setup and release beilei.xing
@ 2023-05-24 9:01 ` Liu, Mingxia
2023-05-26 3:46 ` Xing, Beilei
2023-05-25 3:58 ` Wu, Jingjing
1 sibling, 1 reply; 164+ messages in thread
From: Liu, Mingxia @ 2023-05-24 9:01 UTC (permalink / raw)
To: Xing, Beilei, Wu, Jingjing; +Cc: dev, Wang, Xiao W
> +cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
> + uint16_t nb_desc,
> + const struct rte_eth_hairpin_conf *conf) {
> + struct cpfl_vport *cpfl_vport =
> + (struct cpfl_vport *)dev->data->dev_private;
> +
> + struct idpf_vport *vport = &cpfl_vport->base;
> + struct idpf_adapter *adapter_base = vport->adapter;
> + uint16_t logic_qid = cpfl_vport->nb_p2p_txq;
> + struct cpfl_txq_hairpin_info *hairpin_info;
> + struct idpf_hw *hw = &adapter_base->hw;
> + struct cpfl_tx_queue *cpfl_txq;
> + struct idpf_tx_queue *txq, *cq;
> + const struct rte_memzone *mz;
> + uint32_t ring_size;
> + uint16_t peer_port, peer_q;
> +
> + if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
> + PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin
> queue.");
> + return -EINVAL;
> + }
> +
> + if (conf->peer_count != 1) {
> + PMD_INIT_LOG(ERR, "Can't support Tx hairpin queue peer
> count %d", conf->peer_count);
> + return -EINVAL;
> + }
> +
> + peer_port = conf->peers[0].port;
> + peer_q = conf->peers[0].queue;
> +
> + if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
> + nb_desc > CPFL_MAX_RING_DESC ||
> + nb_desc < CPFL_MIN_RING_DESC) {
> + PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is
> invalid",
> + nb_desc);
> + return -EINVAL;
> + }
> +
> + /* Free memory if needed. */
> + if (dev->data->tx_queues[queue_idx]) {
> + cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
> + dev->data->tx_queues[queue_idx] = NULL;
> + }
> +
> + /* Allocate the TX queue data structure. */
> + cpfl_txq = rte_zmalloc_socket("cpfl hairpin txq",
> + sizeof(struct cpfl_tx_queue),
> + RTE_CACHE_LINE_SIZE,
> + SOCKET_ID_ANY);
> + if (!cpfl_txq) {
> + PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue
> structure");
> + return -ENOMEM;
> + }
> +
> + txq = &cpfl_txq->base;
> + hairpin_info = &cpfl_txq->hairpin_info;
> + /* Txq ring length should be 2 times of Tx completion queue size. */
> + txq->nb_tx_desc = nb_desc * 2;
> + txq->queue_id = cpfl_hw_qid_get(cpfl_vport-
> >p2p_q_chunks_info.tx_start_qid, logic_qid);
> + txq->port_id = dev->data->port_id;
> + hairpin_info->hairpin_q = true;
> + hairpin_info->peer_rxp = peer_port;
> + hairpin_info->peer_rxq_id = peer_q;
> +
> + if (conf->manual_bind != 0)
> + cpfl_vport->p2p_manual_bind = true;
> + else
> + cpfl_vport->p2p_manual_bind = false;
> +
> + /* Always Tx hairpin queue allocates Tx HW ring */
> + ring_size = RTE_ALIGN(txq->nb_tx_desc * CPFL_P2P_DESC_LEN,
> + CPFL_DMA_MEM_ALIGN);
> + mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_ring", logic_qid,
> + ring_size + CPFL_P2P_RING_BUF,
> + CPFL_RING_BASE_ALIGN,
> + dev->device->numa_node);
> + if (!mz) {
> + PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
> + rte_free(txq->sw_ring);
> + rte_free(txq);
> + return -ENOMEM;
> + }
> +
> + txq->tx_ring_phys_addr = mz->iova;
> + txq->desc_ring = mz->addr;
> + txq->mz = mz;
> +
> + cpfl_tx_hairpin_descq_reset(txq);
> + txq->qtx_tail = hw->hw_addr +
> + cpfl_hw_qtail_get(cpfl_vport-
> >p2p_q_chunks_info.tx_qtail_start,
> + logic_qid, cpfl_vport-
> >p2p_q_chunks_info.tx_qtail_spacing);
> + txq->ops = &def_txq_ops;
> +
> + if (cpfl_vport->p2p_tx_complq == NULL) {
[Liu, Mingxia] In cpfl_rx_hairpin_queue_setup(), "logic_qid" is used to identify if it is the first time to allocate "p2p_rx_bufq" buffer,
Can it be unified, using logic_qid == 0 or p2p_tx_complq/ p2p_rx_bufq == NULL ?
> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Friday, May 19, 2023 3:31 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>
> Subject: [PATCH v3 05/10] net/cpfl: support hairpin queue setup and release
>
> From: Beilei Xing <beilei.xing@intel.com>
>
> Support hairpin Rx/Tx queue setup and release.
>
> Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/net/cpfl/cpfl_ethdev.c | 6 +
> drivers/net/cpfl/cpfl_ethdev.h | 12 +
> drivers/net/cpfl/cpfl_rxtx.c | 373 +++++++++++++++++++++++-
> drivers/net/cpfl/cpfl_rxtx.h | 26 ++
> drivers/net/cpfl/cpfl_rxtx_vec_common.h | 4 +
> 5 files changed, 420 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index
> 8e471d2a9b..03813716ce 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.c
> +++ b/drivers/net/cpfl/cpfl_ethdev.c
> @@ -874,6 +874,10 @@ cpfl_dev_close(struct rte_eth_dev *dev)
> struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport-
> >adapter);
>
> cpfl_dev_stop(dev);
> + if (cpfl_vport->p2p_mp) {
> + rte_mempool_free(cpfl_vport->p2p_mp);
> + cpfl_vport->p2p_mp = NULL;
> + }
>
> if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
> cpfl_p2p_queue_grps_del(vport);
> @@ -916,6 +920,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
> .xstats_get_names = cpfl_dev_xstats_get_names,
> .xstats_reset = cpfl_dev_xstats_reset,
> .hairpin_cap_get = cpfl_hairpin_cap_get,
> + .rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
> + .tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
> };
>
> static int
> diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h index
> 65c9a195b2..a48344299c 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.h
> +++ b/drivers/net/cpfl/cpfl_ethdev.h
> @@ -89,6 +89,18 @@ struct p2p_queue_chunks_info { struct cpfl_vport {
> struct idpf_vport base;
> struct p2p_queue_chunks_info p2p_q_chunks_info;
> +
> + struct rte_mempool *p2p_mp;
> +
> + uint16_t nb_data_rxq;
> + uint16_t nb_data_txq;
> + uint16_t nb_p2p_rxq;
> + uint16_t nb_p2p_txq;
> +
> + struct idpf_rx_queue *p2p_rx_bufq;
> + struct idpf_tx_queue *p2p_tx_complq;
> + bool p2p_manual_bind;
> +
> };
>
> struct cpfl_adapter_ext {
> diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c index
> 04a51b8d15..333a399e73 100644
> --- a/drivers/net/cpfl/cpfl_rxtx.c
> +++ b/drivers/net/cpfl/cpfl_rxtx.c
> @@ -10,6 +10,79 @@
> #include "cpfl_rxtx.h"
> #include "cpfl_rxtx_vec_common.h"
>
> +uint16_t
> +cpfl_hw_qid_get(uint16_t start_qid, uint16_t offset) {
> + return start_qid + offset;
> +}
> +
> +uint64_t
> +cpfl_hw_qtail_get(uint64_t tail_start, uint16_t offset, uint64_t
> +tail_spacing) {
> + return tail_start + offset * tail_spacing; }
> +
> +static inline void
> +cpfl_tx_hairpin_descq_reset(struct idpf_tx_queue *txq) {
> + uint32_t i, size;
> +
> + if (!txq) {
> + PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
> + return;
> + }
> +
> + size = txq->nb_tx_desc * CPFL_P2P_DESC_LEN;
> + for (i = 0; i < size; i++)
> + ((volatile char *)txq->desc_ring)[i] = 0; }
> +
> +static inline void
> +cpfl_tx_hairpin_complq_reset(struct idpf_tx_queue *cq) {
> + uint32_t i, size;
> +
> + if (!cq) {
> + PMD_DRV_LOG(DEBUG, "Pointer to complq is NULL");
> + return;
> + }
> +
> + size = cq->nb_tx_desc * CPFL_P2P_DESC_LEN;
> + for (i = 0; i < size; i++)
> + ((volatile char *)cq->compl_ring)[i] = 0; }
> +
> +static inline void
> +cpfl_rx_hairpin_descq_reset(struct idpf_rx_queue *rxq) {
> + uint16_t len;
> + uint32_t i;
> +
> + if (!rxq)
> + return;
> +
> + len = rxq->nb_rx_desc;
> + for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
> + ((volatile char *)rxq->rx_ring)[i] = 0; }
> +
> +static inline void
> +cpfl_rx_hairpin_bufq_reset(struct idpf_rx_queue *rxbq) {
> + uint16_t len;
> + uint32_t i;
> +
> + if (!rxbq)
> + return;
> +
> + len = rxbq->nb_rx_desc;
> + for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
> + ((volatile char *)rxbq->rx_ring)[i] = 0;
> +
> + rxbq->bufq1 = NULL;
> + rxbq->bufq2 = NULL;
> +}
> +
> static uint64_t
> cpfl_rx_offload_convert(uint64_t offload) { @@ -234,7 +307,10 @@
> cpfl_rx_queue_release(void *rxq)
>
> /* Split queue */
> if (!q->adapter->is_rx_singleq) {
> - if (q->bufq2)
> + /* the mz is shared between Tx/Rx hairpin, let Rx_release
> + * free the buf, q->bufq1->mz and q->mz.
> + */
> + if (!cpfl_rxq->hairpin_info.hairpin_q && q->bufq2)
> cpfl_rx_split_bufq_release(q->bufq2);
>
> if (q->bufq1)
> @@ -385,6 +461,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t
> queue_idx,
> }
> }
>
> + cpfl_vport->nb_data_rxq++;
> rxq->q_set = true;
> dev->data->rx_queues[queue_idx] = cpfl_rxq;
>
> @@ -548,6 +625,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t
> queue_idx,
> txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
> queue_idx * vport->chunks_info.tx_qtail_spacing);
> txq->ops = &def_txq_ops;
> + cpfl_vport->nb_data_txq++;
> txq->q_set = true;
> dev->data->tx_queues[queue_idx] = cpfl_txq;
>
> @@ -562,6 +640,297 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev,
> uint16_t queue_idx,
> return ret;
> }
>
> +static int
> +cpfl_rx_hairpin_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue
> *bufq,
> + uint16_t logic_qid, uint16_t nb_desc) {
> + struct cpfl_vport *cpfl_vport =
> + (struct cpfl_vport *)dev->data->dev_private;
> + struct idpf_vport *vport = &cpfl_vport->base;
> + struct idpf_adapter *adapter = vport->adapter;
> + struct rte_mempool *mp;
> + char pool_name[RTE_MEMPOOL_NAMESIZE];
> +
> + mp = cpfl_vport->p2p_mp;
> + if (!mp) {
> + snprintf(pool_name, RTE_MEMPOOL_NAMESIZE,
> "p2p_mb_pool_%u",
> + dev->data->port_id);
> + mp = rte_pktmbuf_pool_create(pool_name,
> CPFL_P2P_NB_MBUF, CPFL_P2P_CACHE_SIZE,
> + 0, CPFL_P2P_MBUF_SIZE, dev-
> >device->numa_node);
> + if (!mp) {
> + PMD_INIT_LOG(ERR, "Failed to allocate mbuf pool for
> p2p");
> + return -ENOMEM;
> + }
> + cpfl_vport->p2p_mp = mp;
> + }
> +
> + bufq->mp = mp;
> + bufq->nb_rx_desc = nb_desc;
> + bufq->queue_id = cpfl_hw_qid_get(cpfl_vport-
> >p2p_q_chunks_info.rx_buf_start_qid, logic_qid);
> + bufq->port_id = dev->data->port_id;
> + bufq->adapter = adapter;
> + bufq->rx_buf_len = CPFL_P2P_MBUF_SIZE -
> RTE_PKTMBUF_HEADROOM;
> +
> + bufq->sw_ring = rte_zmalloc("sw ring",
> + sizeof(struct rte_mbuf *) * nb_desc,
> + RTE_CACHE_LINE_SIZE);
> + if (!bufq->sw_ring) {
> + PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
> + return -ENOMEM;
> + }
> +
> + bufq->q_set = true;
> + bufq->ops = &def_rxq_ops;
> +
> + return 0;
> +}
> +
> +int
> +cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
> + uint16_t nb_desc,
> + const struct rte_eth_hairpin_conf *conf) {
> + struct cpfl_vport *cpfl_vport = (struct cpfl_vport *)dev->data-
> >dev_private;
> + struct idpf_vport *vport = &cpfl_vport->base;
> + struct idpf_adapter *adapter_base = vport->adapter;
> + uint16_t logic_qid = cpfl_vport->nb_p2p_rxq;
> + struct cpfl_rxq_hairpin_info *hairpin_info;
> + struct cpfl_rx_queue *cpfl_rxq;
> + struct idpf_rx_queue *bufq1 = NULL;
> + struct idpf_rx_queue *rxq;
> + uint16_t peer_port, peer_q;
> + uint16_t qid;
> + int ret;
> +
> + if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
> + PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin
> queue.");
> + return -EINVAL;
> + }
> +
> + if (conf->peer_count != 1) {
> + PMD_INIT_LOG(ERR, "Can't support Rx hairpin queue peer
> count %d", conf->peer_count);
> + return -EINVAL;
> + }
> +
> + peer_port = conf->peers[0].port;
> + peer_q = conf->peers[0].queue;
> +
> + if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
> + nb_desc > CPFL_MAX_RING_DESC ||
> + nb_desc < CPFL_MIN_RING_DESC) {
> + PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is
> invalid", nb_desc);
> + return -EINVAL;
> + }
> +
> + /* Free memory if needed */
> + if (dev->data->rx_queues[queue_idx]) {
> + cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
> + dev->data->rx_queues[queue_idx] = NULL;
> + }
> +
> + /* Setup Rx description queue */
> + cpfl_rxq = rte_zmalloc_socket("cpfl hairpin rxq",
> + sizeof(struct cpfl_rx_queue),
> + RTE_CACHE_LINE_SIZE,
> + SOCKET_ID_ANY);
> + if (!cpfl_rxq) {
> + PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue
> data structure");
> + return -ENOMEM;
> + }
> +
> + rxq = &cpfl_rxq->base;
> + hairpin_info = &cpfl_rxq->hairpin_info;
> + rxq->nb_rx_desc = nb_desc * 2;
> + rxq->queue_id = cpfl_hw_qid_get(cpfl_vport-
> >p2p_q_chunks_info.rx_start_qid, logic_qid);
> + rxq->port_id = dev->data->port_id;
> + rxq->adapter = adapter_base;
> + rxq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
> + hairpin_info->hairpin_q = true;
> + hairpin_info->peer_txp = peer_port;
> + hairpin_info->peer_txq_id = peer_q;
> +
> + if (conf->manual_bind != 0)
> + cpfl_vport->p2p_manual_bind = true;
> + else
> + cpfl_vport->p2p_manual_bind = false;
> +
> + /* setup 1 Rx buffer queue for the 1st hairpin rxq */
> + if (logic_qid == 0) {
> + bufq1 = rte_zmalloc_socket("hairpin rx bufq1",
> + sizeof(struct idpf_rx_queue),
> + RTE_CACHE_LINE_SIZE,
> + SOCKET_ID_ANY);
> + if (!bufq1) {
> + PMD_INIT_LOG(ERR, "Failed to allocate memory for
> hairpin Rx buffer queue 1.");
> + ret = -ENOMEM;
> + goto err_alloc_bufq1;
> + }
> + qid = 2 * logic_qid;
> + ret = cpfl_rx_hairpin_bufq_setup(dev, bufq1, qid, nb_desc);
> + if (ret) {
> + PMD_INIT_LOG(ERR, "Failed to setup hairpin Rx buffer
> queue 1");
> + ret = -EINVAL;
> + goto err_setup_bufq1;
> + }
> + cpfl_vport->p2p_rx_bufq = bufq1;
> + }
> +
> + rxq->bufq1 = cpfl_vport->p2p_rx_bufq;
> + rxq->bufq2 = NULL;
> +
> + cpfl_vport->nb_p2p_rxq++;
> + rxq->q_set = true;
> + dev->data->rx_queues[queue_idx] = cpfl_rxq;
> +
> + return 0;
> +
> +err_setup_bufq1:
> + rte_free(bufq1);
> +err_alloc_bufq1:
> + rte_free(rxq);
> +
> + return ret;
> +}
> +
> +int
> +cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
> + uint16_t nb_desc,
> + const struct rte_eth_hairpin_conf *conf) {
> + struct cpfl_vport *cpfl_vport =
> + (struct cpfl_vport *)dev->data->dev_private;
> +
> + struct idpf_vport *vport = &cpfl_vport->base;
> + struct idpf_adapter *adapter_base = vport->adapter;
> + uint16_t logic_qid = cpfl_vport->nb_p2p_txq;
> + struct cpfl_txq_hairpin_info *hairpin_info;
> + struct idpf_hw *hw = &adapter_base->hw;
> + struct cpfl_tx_queue *cpfl_txq;
> + struct idpf_tx_queue *txq, *cq;
> + const struct rte_memzone *mz;
> + uint32_t ring_size;
> + uint16_t peer_port, peer_q;
> +
> + if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
> + PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin
> queue.");
> + return -EINVAL;
> + }
> +
> + if (conf->peer_count != 1) {
> + PMD_INIT_LOG(ERR, "Can't support Tx hairpin queue peer
> count %d", conf->peer_count);
> + return -EINVAL;
> + }
> +
> + peer_port = conf->peers[0].port;
> + peer_q = conf->peers[0].queue;
> +
> + if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
> + nb_desc > CPFL_MAX_RING_DESC ||
> + nb_desc < CPFL_MIN_RING_DESC) {
> + PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is
> invalid",
> + nb_desc);
> + return -EINVAL;
> + }
> +
> + /* Free memory if needed. */
> + if (dev->data->tx_queues[queue_idx]) {
> + cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
> + dev->data->tx_queues[queue_idx] = NULL;
> + }
> +
> + /* Allocate the TX queue data structure. */
> + cpfl_txq = rte_zmalloc_socket("cpfl hairpin txq",
> + sizeof(struct cpfl_tx_queue),
> + RTE_CACHE_LINE_SIZE,
> + SOCKET_ID_ANY);
> + if (!cpfl_txq) {
> + PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue
> structure");
> + return -ENOMEM;
> + }
> +
> + txq = &cpfl_txq->base;
> + hairpin_info = &cpfl_txq->hairpin_info;
> + /* Txq ring length should be 2 times of Tx completion queue size. */
> + txq->nb_tx_desc = nb_desc * 2;
> + txq->queue_id = cpfl_hw_qid_get(cpfl_vport-
> >p2p_q_chunks_info.tx_start_qid, logic_qid);
> + txq->port_id = dev->data->port_id;
> + hairpin_info->hairpin_q = true;
> + hairpin_info->peer_rxp = peer_port;
> + hairpin_info->peer_rxq_id = peer_q;
> +
> + if (conf->manual_bind != 0)
> + cpfl_vport->p2p_manual_bind = true;
> + else
> + cpfl_vport->p2p_manual_bind = false;
> +
> + /* Always Tx hairpin queue allocates Tx HW ring */
> + ring_size = RTE_ALIGN(txq->nb_tx_desc * CPFL_P2P_DESC_LEN,
> + CPFL_DMA_MEM_ALIGN);
> + mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_ring", logic_qid,
> + ring_size + CPFL_P2P_RING_BUF,
> + CPFL_RING_BASE_ALIGN,
> + dev->device->numa_node);
> + if (!mz) {
> + PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
> + rte_free(txq->sw_ring);
> + rte_free(txq);
> + return -ENOMEM;
> + }
> +
> + txq->tx_ring_phys_addr = mz->iova;
> + txq->desc_ring = mz->addr;
> + txq->mz = mz;
> +
> + cpfl_tx_hairpin_descq_reset(txq);
> + txq->qtx_tail = hw->hw_addr +
> + cpfl_hw_qtail_get(cpfl_vport-
> >p2p_q_chunks_info.tx_qtail_start,
> + logic_qid, cpfl_vport-
> >p2p_q_chunks_info.tx_qtail_spacing);
> + txq->ops = &def_txq_ops;
> +
> + if (cpfl_vport->p2p_tx_complq == NULL) {
[Liu, Mingxia] In cpfl_rx_hairpin_queue_setup(), "logic_qid" is used to identify if it is the first time to allocate "p2p_rx_bufq" buffer,
Can it be unified, using logic_qid == 0 or p2p_tx_complq/ p2p_rx_bufq == NULL ?
> + cq = rte_zmalloc_socket("cpfl hairpin cq",
> + sizeof(struct idpf_tx_queue),
> + RTE_CACHE_LINE_SIZE,
> + dev->device->numa_node);
> + if (!cq) {
> + PMD_INIT_LOG(ERR, "Failed to allocate memory for tx
> queue structure");
> + return -ENOMEM;
> + }
> +
> + cq->nb_tx_desc = nb_desc;
> + cq->queue_id = cpfl_hw_qid_get(cpfl_vport-
> >p2p_q_chunks_info.tx_compl_start_qid, 0);
> + cq->port_id = dev->data->port_id;
> +
> + /* Tx completion queue always allocates the HW ring */
> + ring_size = RTE_ALIGN(cq->nb_tx_desc * CPFL_P2P_DESC_LEN,
> + CPFL_DMA_MEM_ALIGN);
> + mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_compl_ring",
> logic_qid,
> + ring_size + CPFL_P2P_RING_BUF,
> + CPFL_RING_BASE_ALIGN,
> + dev->device->numa_node);
> + if (!mz) {
> + PMD_INIT_LOG(ERR, "Failed to reserve DMA memory
> for TX completion queue");
> + rte_free(txq->sw_ring);
> + rte_free(txq);
> + return -ENOMEM;
> + }
> + cq->tx_ring_phys_addr = mz->iova;
> + cq->compl_ring = mz->addr;
> + cq->mz = mz;
> +
> + cpfl_tx_hairpin_complq_reset(cq);
> + cpfl_vport->p2p_tx_complq = cq;
> + }
> +
> + txq->complq = cpfl_vport->p2p_tx_complq;
> +
> + cpfl_vport->nb_p2p_txq++;
> + txq->q_set = true;
> + dev->data->tx_queues[queue_idx] = cpfl_txq;
> +
> + return 0;
> +}
> +
> int
> cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id) { @@ -
> 865,6 +1234,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
> if (vport->rx_vec_allowed) {
> for (i = 0; i < dev->data->nb_rx_queues; i++) {
> cpfl_rxq = dev->data->rx_queues[i];
> + if (cpfl_rxq->hairpin_info.hairpin_q)
> + continue;
> (void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq-
> >base);
> }
> #ifdef CC_AVX512_SUPPORT
> diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h index
> 3a87a1f4b3..5e9f2dada7 100644
> --- a/drivers/net/cpfl/cpfl_rxtx.h
> +++ b/drivers/net/cpfl/cpfl_rxtx.h
> @@ -13,6 +13,7 @@
> #define CPFL_MIN_RING_DESC 32
> #define CPFL_MAX_RING_DESC 4096
> #define CPFL_DMA_MEM_ALIGN 4096
> +#define CPFL_P2P_DESC_LEN 16
> #define CPFL_MAX_HAIRPINQ_RX_2_TX 1
> #define CPFL_MAX_HAIRPINQ_TX_2_RX 1
> #define CPFL_MAX_HAIRPINQ_NB_DESC 1024
> @@ -21,6 +22,10 @@
> #define CPFL_P2P_NB_TX_COMPLQ 1
> #define CPFL_P2P_NB_QUEUE_GRPS 1
> #define CPFL_P2P_QUEUE_GRP_ID 1
> +#define CPFL_P2P_NB_MBUF 4096
> +#define CPFL_P2P_CACHE_SIZE 250
> +#define CPFL_P2P_MBUF_SIZE 2048
> +#define CPFL_P2P_RING_BUF 128
> /* Base address of the HW descriptor ring should be 128B aligned. */
> #define CPFL_RING_BASE_ALIGN 128
>
> @@ -31,12 +36,26 @@
>
> #define CPFL_SUPPORT_CHAIN_NUM 5
>
> +struct cpfl_rxq_hairpin_info {
> + bool hairpin_q; /* if rx queue is a hairpin queue */
> + uint16_t peer_txp;
> + uint16_t peer_txq_id;
> +};
> +
> struct cpfl_rx_queue {
> struct idpf_rx_queue base;
> + struct cpfl_rxq_hairpin_info hairpin_info; };
> +
> +struct cpfl_txq_hairpin_info {
> + bool hairpin_q; /* if tx queue is a hairpin queue */
> + uint16_t peer_rxp;
> + uint16_t peer_rxq_id;
> };
>
> struct cpfl_tx_queue {
> struct idpf_tx_queue base;
> + struct cpfl_txq_hairpin_info hairpin_info;
> };
>
> int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -
> 57,4 +76,11 @@ void cpfl_dev_tx_queue_release(struct rte_eth_dev *dev,
> uint16_t qid); void cpfl_dev_rx_queue_release(struct rte_eth_dev *dev,
> uint16_t qid); void cpfl_set_rx_function(struct rte_eth_dev *dev); void
> cpfl_set_tx_function(struct rte_eth_dev *dev);
> +uint16_t cpfl_hw_qid_get(uint16_t start_qid, uint16_t offset); uint64_t
> +cpfl_hw_qtail_get(uint64_t tail_start, uint16_t offset, uint64_t
> +tail_spacing); int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev,
> uint16_t queue_idx,
> + uint16_t nb_desc, const struct
> rte_eth_hairpin_conf *conf); int
> +cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
> + uint16_t nb_desc,
> + const struct rte_eth_hairpin_conf *conf);
> #endif /* _CPFL_RXTX_H_ */
> diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
> b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
> index 5690b17911..d8e9191196 100644
> --- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
> +++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
> @@ -85,6 +85,8 @@ cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)
> cpfl_rxq = dev->data->rx_queues[i];
> default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);
> if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
> + if (cpfl_rxq->hairpin_info.hairpin_q)
> + continue;
> splitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq-
> >base);
> ret = splitq_ret && default_ret;
> } else {
> @@ -106,6 +108,8 @@ cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)
>
> for (i = 0; i < dev->data->nb_tx_queues; i++) {
> cpfl_txq = dev->data->tx_queues[i];
> + if (cpfl_txq->hairpin_info.hairpin_q)
> + continue;
> ret = cpfl_tx_vec_queue_default(&cpfl_txq->base);
> if (ret == CPFL_SCALAR_PATH)
> return CPFL_SCALAR_PATH;
> --
> 2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH v3 02/10] net/cpfl: support hairpin queue capbility get
2023-05-19 7:31 ` [PATCH v3 02/10] net/cpfl: support hairpin queue capbility get beilei.xing
@ 2023-05-24 14:30 ` Wu, Jingjing
0 siblings, 0 replies; 164+ messages in thread
From: Wu, Jingjing @ 2023-05-24 14:30 UTC (permalink / raw)
To: Xing, Beilei; +Cc: dev, Liu, Mingxia, Wang, Xiao W
> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Friday, May 19, 2023 3:31 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>
> Subject: [PATCH v3 02/10] net/cpfl: support hairpin queue capbility get
>
> From: Beilei Xing <beilei.xing@intel.com>
>
> This patch adds hairpin_cap_get ops support.
>
> Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/net/cpfl/cpfl_ethdev.c | 13 +++++++++++++
> drivers/net/cpfl/cpfl_rxtx.h | 4 ++++
> 2 files changed, 17 insertions(+)
>
> diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
> index e587155db6..b6fd0b05d0 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.c
> +++ b/drivers/net/cpfl/cpfl_ethdev.c
> @@ -154,6 +154,18 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,
> return rte_eth_linkstatus_set(dev, &new_link);
> }
>
> +static int
> +cpfl_hairpin_cap_get(__rte_unused struct rte_eth_dev *dev,
> + struct rte_eth_hairpin_cap *cap)
> +{
> + cap->max_nb_queues = CPFL_MAX_P2P_NB_QUEUES;
> + cap->max_rx_2_tx = CPFL_MAX_HAIRPINQ_RX_2_TX;
> + cap->max_tx_2_rx = CPFL_MAX_HAIRPINQ_TX_2_RX;
> + cap->max_nb_desc = CPFL_MAX_HAIRPINQ_NB_DESC;
> +
Is that better to check if p2p queue group is added successfully and then return success?
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH v3 04/10] net/cpfl: add haipin queue group during vport init
2023-05-19 7:31 ` [PATCH v3 04/10] net/cpfl: add haipin queue group during vport init beilei.xing
@ 2023-05-24 14:38 ` Wu, Jingjing
0 siblings, 0 replies; 164+ messages in thread
From: Wu, Jingjing @ 2023-05-24 14:38 UTC (permalink / raw)
To: Xing, Beilei; +Cc: dev, Liu, Mingxia
> static int
> cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
> {
> @@ -1306,6 +1414,8 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
> struct cpfl_adapter_ext *adapter = param->adapter;
> /* for sending create vport virtchnl msg prepare */
> struct virtchnl2_create_vport create_vport_info;
> + struct virtchnl2_add_queue_groups p2p_queue_grps_info;
> + uint8_t p2p_q_vc_out_info[IDPF_DFLT_MBX_BUF_SIZE] = {0};
> int ret = 0;
>
> dev->dev_ops = &cpfl_eth_dev_ops;
> @@ -1340,8 +1450,28 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void
> *init_params)
> rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
> &dev->data->mac_addrs[0]);
>
> + if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
> + memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
> + ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info,
> p2p_q_vc_out_info);
> + if (ret != 0) {
> + PMD_INIT_LOG(ERR, "Failed to add p2p queue group.");
> + goto err_q_grps_add;
> + }
> + ret = cpfl_p2p_queue_info_init(cpfl_vport,
> + (struct virtchnl2_add_queue_groups
> *)p2p_q_vc_out_info);
> + if (ret != 0) {
> + PMD_INIT_LOG(ERR, "Failed to init p2p queue info.");
> + goto err_p2p_qinfo_init;
If it is failed to add p2p queue group, the device init will quit?
I think it should be better to continue initialization just without p2p capability.
> + }
> + }
> +
> return 0;
>
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH v3 05/10] net/cpfl: support hairpin queue setup and release
2023-05-19 7:31 ` [PATCH v3 05/10] net/cpfl: support hairpin queue setup and release beilei.xing
2023-05-24 9:01 ` Liu, Mingxia
@ 2023-05-25 3:58 ` Wu, Jingjing
2023-05-26 3:52 ` Xing, Beilei
1 sibling, 1 reply; 164+ messages in thread
From: Wu, Jingjing @ 2023-05-25 3:58 UTC (permalink / raw)
To: Xing, Beilei; +Cc: dev, Liu, Mingxia, Wang, Xiao W
>
> +static int
> +cpfl_rx_hairpin_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,
> + uint16_t logic_qid, uint16_t nb_desc)
> +{
> + struct cpfl_vport *cpfl_vport =
> + (struct cpfl_vport *)dev->data->dev_private;
> + struct idpf_vport *vport = &cpfl_vport->base;
> + struct idpf_adapter *adapter = vport->adapter;
> + struct rte_mempool *mp;
> + char pool_name[RTE_MEMPOOL_NAMESIZE];
> +
> + mp = cpfl_vport->p2p_mp;
> + if (!mp) {
> + snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "p2p_mb_pool_%u",
> + dev->data->port_id);
> + mp = rte_pktmbuf_pool_create(pool_name, CPFL_P2P_NB_MBUF,
> CPFL_P2P_CACHE_SIZE,
> + 0, CPFL_P2P_MBUF_SIZE, dev->device-
> >numa_node);
> + if (!mp) {
> + PMD_INIT_LOG(ERR, "Failed to allocate mbuf pool for p2p");
> + return -ENOMEM;
> + }
> + cpfl_vport->p2p_mp = mp;
> + }
> +
> + bufq->mp = mp;
> + bufq->nb_rx_desc = nb_desc;
> + bufq->queue_id = cpfl_hw_qid_get(cpfl_vport-
> >p2p_q_chunks_info.rx_buf_start_qid, logic_qid);
> + bufq->port_id = dev->data->port_id;
> + bufq->adapter = adapter;
> + bufq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
> +
> + bufq->sw_ring = rte_zmalloc("sw ring",
> + sizeof(struct rte_mbuf *) * nb_desc,
> + RTE_CACHE_LINE_SIZE);
Is sw_ring required in p2p case? It has been never used right?
Please also check the sw_ring in tx queue.
> + if (!bufq->sw_ring) {
> + PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
> + return -ENOMEM;
> + }
> +
> + bufq->q_set = true;
> + bufq->ops = &def_rxq_ops;
> +
> + return 0;
> +}
> +
> +int
> +cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
> + uint16_t nb_desc,
> + const struct rte_eth_hairpin_conf *conf)
> +{
> + struct cpfl_vport *cpfl_vport = (struct cpfl_vport *)dev->data->dev_private;
> + struct idpf_vport *vport = &cpfl_vport->base;
> + struct idpf_adapter *adapter_base = vport->adapter;
> + uint16_t logic_qid = cpfl_vport->nb_p2p_rxq;
> + struct cpfl_rxq_hairpin_info *hairpin_info;
> + struct cpfl_rx_queue *cpfl_rxq;
> + struct idpf_rx_queue *bufq1 = NULL;
> + struct idpf_rx_queue *rxq;
> + uint16_t peer_port, peer_q;
> + uint16_t qid;
> + int ret;
> +
> + if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
> + PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue.");
> + return -EINVAL;
> + }
> +
> + if (conf->peer_count != 1) {
> + PMD_INIT_LOG(ERR, "Can't support Rx hairpin queue peer count %d",
> conf->peer_count);
> + return -EINVAL;
> + }
> +
> + peer_port = conf->peers[0].port;
> + peer_q = conf->peers[0].queue;
> +
> + if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
> + nb_desc > CPFL_MAX_RING_DESC ||
> + nb_desc < CPFL_MIN_RING_DESC) {
> + PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is invalid",
> nb_desc);
> + return -EINVAL;
> + }
> +
> + /* Free memory if needed */
> + if (dev->data->rx_queues[queue_idx]) {
> + cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
> + dev->data->rx_queues[queue_idx] = NULL;
> + }
> +
> + /* Setup Rx description queue */
> + cpfl_rxq = rte_zmalloc_socket("cpfl hairpin rxq",
> + sizeof(struct cpfl_rx_queue),
> + RTE_CACHE_LINE_SIZE,
> + SOCKET_ID_ANY);
> + if (!cpfl_rxq) {
> + PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data
> structure");
> + return -ENOMEM;
> + }
> +
> + rxq = &cpfl_rxq->base;
> + hairpin_info = &cpfl_rxq->hairpin_info;
> + rxq->nb_rx_desc = nb_desc * 2;
> + rxq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info.rx_start_qid,
> logic_qid);
> + rxq->port_id = dev->data->port_id;
> + rxq->adapter = adapter_base;
> + rxq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
> + hairpin_info->hairpin_q = true;
> + hairpin_info->peer_txp = peer_port;
> + hairpin_info->peer_txq_id = peer_q;
> +
> + if (conf->manual_bind != 0)
> + cpfl_vport->p2p_manual_bind = true;
> + else
> + cpfl_vport->p2p_manual_bind = false;
> +
> + /* setup 1 Rx buffer queue for the 1st hairpin rxq */
> + if (logic_qid == 0) {
> + bufq1 = rte_zmalloc_socket("hairpin rx bufq1",
> + sizeof(struct idpf_rx_queue),
> + RTE_CACHE_LINE_SIZE,
> + SOCKET_ID_ANY);
> + if (!bufq1) {
> + PMD_INIT_LOG(ERR, "Failed to allocate memory for hairpin Rx
> buffer queue 1.");
> + ret = -ENOMEM;
> + goto err_alloc_bufq1;
> + }
> + qid = 2 * logic_qid;
Inside the brace ( if (logic_qid=0) {} ), the logic_qid should be zero, right? What is the purpose of doing qid = 2* logic_qid?
> + ret = cpfl_rx_hairpin_bufq_setup(dev, bufq1, qid, nb_desc);
> + if (ret) {
> + PMD_INIT_LOG(ERR, "Failed to setup hairpin Rx buffer queue 1");
> + ret = -EINVAL;
> + goto err_setup_bufq1;
> + }
> + cpfl_vport->p2p_rx_bufq = bufq1;
> + }
> +
> + rxq->bufq1 = cpfl_vport->p2p_rx_bufq;
> + rxq->bufq2 = NULL;
> +
cpfl_vport->p2p_rx_bufq is allocated in this function. But haven't seen where it will be released.
And in cpfl_rx_hairpin_bufq_reset the rxq->bufq1 will be assigned to NULL. Will queue release miss this?
> + cpfl_vport->nb_p2p_rxq++;
> + rxq->q_set = true;
> + dev->data->rx_queues[queue_idx] = cpfl_rxq;
> +
> + return 0;
> +
> +err_setup_bufq1:
> + rte_free(bufq1);
> +err_alloc_bufq1:
> + rte_free(rxq);
> +
> + return ret;
> +}
> +
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH v3 07/10] net/cpfl: support hairpin queue start/stop
2023-05-19 7:31 ` [PATCH v3 07/10] net/cpfl: support hairpin queue start/stop beilei.xing
@ 2023-05-25 4:12 ` Wu, Jingjing
0 siblings, 0 replies; 164+ messages in thread
From: Wu, Jingjing @ 2023-05-25 4:12 UTC (permalink / raw)
To: Xing, Beilei; +Cc: dev, Liu, Mingxia, Wang, Xiao W
> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Friday, May 19, 2023 3:31 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>
> Subject: [PATCH v3 07/10] net/cpfl: support hairpin queue start/stop
>
> From: Beilei Xing <beilei.xing@intel.com>
>
> This patch supports Rx/Tx hairpin queue start/stop.
>
> Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/common/idpf/idpf_common_virtchnl.c | 2 +-
> drivers/common/idpf/idpf_common_virtchnl.h | 3 +
> drivers/common/idpf/version.map | 1 +
> drivers/net/cpfl/cpfl_ethdev.c | 41 ++++++
> drivers/net/cpfl/cpfl_rxtx.c | 153 ++++++++++++++++++---
> drivers/net/cpfl/cpfl_rxtx.h | 14 ++
> 6 files changed, 195 insertions(+), 19 deletions(-)
>
> diff --git a/drivers/common/idpf/idpf_common_virtchnl.c
> b/drivers/common/idpf/idpf_common_virtchnl.c
> index 211b44a88e..6455f640da 100644
> --- a/drivers/common/idpf/idpf_common_virtchnl.c
> +++ b/drivers/common/idpf/idpf_common_virtchnl.c
> @@ -733,7 +733,7 @@ idpf_vc_vectors_dealloc(struct idpf_vport *vport)
> return err;
> }
>
> -static int
> +int
> idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
> uint32_t type, bool on)
> {
> diff --git a/drivers/common/idpf/idpf_common_virtchnl.h
> b/drivers/common/idpf/idpf_common_virtchnl.h
> index db83761a5e..9ff5c38c26 100644
> --- a/drivers/common/idpf/idpf_common_virtchnl.h
> +++ b/drivers/common/idpf/idpf_common_virtchnl.h
> @@ -71,6 +71,9 @@ __rte_internal
> int idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info
> *txq_info,
> uint16_t num_qs);
> __rte_internal
> +int idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
> + uint32_t type, bool on);
> +__rte_internal
> int idpf_vc_queue_grps_del(struct idpf_vport *vport,
> uint16_t num_q_grps,
> struct virtchnl2_queue_group_id *qg_ids);
> diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
> index 17e77884ce..25624732b0 100644
> --- a/drivers/common/idpf/version.map
> +++ b/drivers/common/idpf/version.map
> @@ -40,6 +40,7 @@ INTERNAL {
> idpf_vc_cmd_execute;
> idpf_vc_ctlq_post_rx_buffs;
> idpf_vc_ctlq_recv;
> + idpf_vc_ena_dis_one_queue;
> idpf_vc_irq_map_unmap_config;
> idpf_vc_one_msg_read;
> idpf_vc_ptype_info_query;
This change is in common, better to split this patch to two.
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH v3 08/10] net/cpfl: enable write back based on ITR expire
2023-05-19 7:31 ` [PATCH v3 08/10] net/cpfl: enable write back based on ITR expire beilei.xing
@ 2023-05-25 4:17 ` Wu, Jingjing
0 siblings, 0 replies; 164+ messages in thread
From: Wu, Jingjing @ 2023-05-25 4:17 UTC (permalink / raw)
To: Xing, Beilei; +Cc: dev, Liu, Mingxia
> idpf_vport_irq_unmap_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
> {
> diff --git a/drivers/common/idpf/idpf_common_device.h
> b/drivers/common/idpf/idpf_common_device.h
> index 112367dae8..f767ea7cec 100644
> --- a/drivers/common/idpf/idpf_common_device.h
> +++ b/drivers/common/idpf/idpf_common_device.h
> @@ -200,5 +200,9 @@ int idpf_vport_info_init(struct idpf_vport *vport,
> struct virtchnl2_create_vport *vport_info);
> __rte_internal
> void idpf_vport_stats_update(struct virtchnl2_vport_stats *oes, struct
> virtchnl2_vport_stats *nes);
> +__rte_internal
> +int idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport,
> + uint32_t *qids,
> + uint16_t nb_rx_queues);
>
> #endif /* _IDPF_COMMON_DEVICE_H_ */
> diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
> index 25624732b0..0729f6b912 100644
> --- a/drivers/common/idpf/version.map
> +++ b/drivers/common/idpf/version.map
> @@ -69,6 +69,7 @@ INTERNAL {
> idpf_vport_info_init;
> idpf_vport_init;
> idpf_vport_irq_map_config;
> + idpf_vport_irq_map_config_by_qids;
> idpf_vport_irq_unmap_config;
> idpf_vport_rss_config;
> idpf_vport_stats_update;
The same, split common change with net one?
> diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
> index c2ab0690fc..3b480178c0 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.c
> +++ b/drivers/net/cpfl/cpfl_ethdev.c
> @@ -730,11 +730,22 @@ cpfl_dev_configure(struct rte_eth_dev *dev)
> static int
> cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
> {
> + uint32_t qids[CPFL_MAX_P2P_NB_QUEUES + IDPF_DEFAULT_RXQ_NUM] = {0};
> struct cpfl_vport *cpfl_vport = dev->data->dev_private;
> struct idpf_vport *vport = &cpfl_vport->base;
> uint16_t nb_rx_queues = dev->data->nb_rx_queues;
> + struct cpfl_rx_queue *cpfl_rxq;
> + int i;
>
> - return idpf_vport_irq_map_config(vport, nb_rx_queues);
> + for (i = 0; i < nb_rx_queues; i++) {
> + cpfl_rxq = dev->data->rx_queues[i];
> + if (cpfl_rxq->hairpin_info.hairpin_q)
> + qids[i] = cpfl_hw_qid_get(cpfl_vport-
> >p2p_q_chunks_info.rx_start_qid,
> + (i - cpfl_vport->nb_data_rxq));
> + else
> + qids[i] = cpfl_hw_qid_get(vport->chunks_info.rx_start_qid, i);
Looks like cpfl_hw_qid_get and is used cross files, how about defined it as inline or Macro in header file?
> + }
> + return idpf_vport_irq_map_config_by_qids(vport, qids, nb_rx_queues);
> }
>
> /* Update hairpin_info for dev's tx hairpin queue */
> --
> 2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH v3 09/10] net/cpfl: support peer ports get
2023-05-19 7:31 ` [PATCH v3 09/10] net/cpfl: support peer ports get beilei.xing
@ 2023-05-25 5:26 ` Wu, Jingjing
0 siblings, 0 replies; 164+ messages in thread
From: Wu, Jingjing @ 2023-05-25 5:26 UTC (permalink / raw)
To: Xing, Beilei; +Cc: dev, Liu, Mingxia, Wang, Xiao W
> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Friday, May 19, 2023 3:31 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>
> Subject: [PATCH v3 09/10] net/cpfl: support peer ports get
>
> From: Beilei Xing <beilei.xing@intel.com>
>
> This patch supports get hairpin peer ports.
>
> Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/net/cpfl/cpfl_ethdev.c | 34 ++++++++++++++++++++++++++++++++++
> 1 file changed, 34 insertions(+)
>
> diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
> index 3b480178c0..59c7e75d2a 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.c
> +++ b/drivers/net/cpfl/cpfl_ethdev.c
> @@ -1069,6 +1069,39 @@ cpfl_dev_close(struct rte_eth_dev *dev)
> return 0;
> }
>
> +static int
> +cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
> + __rte_unused size_t len, uint32_t tx)
> +{
Param len is used to identify the size of the peer ports array.
You should use it, and check if peer_ports is null. Otherwise will cause invalid access.
* array length
> + struct cpfl_vport *cpfl_vport =
> + (struct cpfl_vport *)dev->data->dev_private;
> + struct idpf_tx_queue *txq;
> + struct idpf_rx_queue *rxq;
> + struct cpfl_tx_queue *cpfl_txq;
> + struct cpfl_rx_queue *cpfl_rxq;
> + int i, j;
> +
> + if (tx > 0) {
> + for (i = cpfl_vport->nb_data_txq, j = 0; i < dev->data->nb_tx_queues; i++,
> j++) {
> + txq = dev->data->tx_queues[i];
> + if (txq == NULL)
> + return -EINVAL;
> + cpfl_txq = (struct cpfl_tx_queue *)txq;
> + peer_ports[j] = cpfl_txq->hairpin_info.peer_rxp;
> + }
> + } else if (tx == 0) {
> + for (i = cpfl_vport->nb_data_rxq, j = 0; i < dev->data->nb_rx_queues; i++,
> j++) {
> + rxq = dev->data->rx_queues[i];
> + if (rxq == NULL)
> + return -EINVAL;
> + cpfl_rxq = (struct cpfl_rx_queue *)rxq;
> + peer_ports[j] = cpfl_rxq->hairpin_info.peer_txp;
> + }
> + }
> +
> + return j;
> +}
> +
> static const struct eth_dev_ops cpfl_eth_dev_ops = {
> .dev_configure = cpfl_dev_configure,
> .dev_close = cpfl_dev_close,
> @@ -1098,6 +1131,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
> .hairpin_cap_get = cpfl_hairpin_cap_get,
> .rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
> .tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
> + .hairpin_get_peer_ports = cpfl_hairpin_get_peer_ports,
> };
>
> static int
> --
> 2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH v3 05/10] net/cpfl: support hairpin queue setup and release
2023-05-24 9:01 ` Liu, Mingxia
@ 2023-05-26 3:46 ` Xing, Beilei
0 siblings, 0 replies; 164+ messages in thread
From: Xing, Beilei @ 2023-05-26 3:46 UTC (permalink / raw)
To: Liu, Mingxia, Wu, Jingjing; +Cc: dev, Wang, Xiao W
> -----Original Message-----
> From: Liu, Mingxia <mingxia.liu@intel.com>
> Sent: Wednesday, May 24, 2023 5:02 PM
> To: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Wang, Xiao W <xiao.w.wang@intel.com>
> Subject: RE: [PATCH v3 05/10] net/cpfl: support hairpin queue setup and
> release
>
> > +cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
> > + uint16_t nb_desc,
> > + const struct rte_eth_hairpin_conf *conf) {
> > + struct cpfl_vport *cpfl_vport =
> > + (struct cpfl_vport *)dev->data->dev_private;
> > +
> > + struct idpf_vport *vport = &cpfl_vport->base;
> > + struct idpf_adapter *adapter_base = vport->adapter;
> > + uint16_t logic_qid = cpfl_vport->nb_p2p_txq;
> > + struct cpfl_txq_hairpin_info *hairpin_info;
> > + struct idpf_hw *hw = &adapter_base->hw;
> > + struct cpfl_tx_queue *cpfl_txq;
> > + struct idpf_tx_queue *txq, *cq;
> > + const struct rte_memzone *mz;
> > + uint32_t ring_size;
> > + uint16_t peer_port, peer_q;
> > +
> > + if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
> > + PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin
> > queue.");
> > + return -EINVAL;
> > + }
> > +
> > + if (conf->peer_count != 1) {
> > + PMD_INIT_LOG(ERR, "Can't support Tx hairpin queue peer
> > count %d", conf->peer_count);
> > + return -EINVAL;
> > + }
> > +
> > + peer_port = conf->peers[0].port;
> > + peer_q = conf->peers[0].queue;
> > +
> > + if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
> > + nb_desc > CPFL_MAX_RING_DESC ||
> > + nb_desc < CPFL_MIN_RING_DESC) {
> > + PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is
> > invalid",
> > + nb_desc);
> > + return -EINVAL;
> > + }
> > +
> > + /* Free memory if needed. */
> > + if (dev->data->tx_queues[queue_idx]) {
> > + cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
> > + dev->data->tx_queues[queue_idx] = NULL;
> > + }
> > +
> > + /* Allocate the TX queue data structure. */
> > + cpfl_txq = rte_zmalloc_socket("cpfl hairpin txq",
> > + sizeof(struct cpfl_tx_queue),
> > + RTE_CACHE_LINE_SIZE,
> > + SOCKET_ID_ANY);
> > + if (!cpfl_txq) {
> > + PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue
> > structure");
> > + return -ENOMEM;
> > + }
> > +
> > + txq = &cpfl_txq->base;
> > + hairpin_info = &cpfl_txq->hairpin_info;
> > + /* Txq ring length should be 2 times of Tx completion queue size. */
> > + txq->nb_tx_desc = nb_desc * 2;
> > + txq->queue_id = cpfl_hw_qid_get(cpfl_vport-
> > >p2p_q_chunks_info.tx_start_qid, logic_qid);
> > + txq->port_id = dev->data->port_id;
> > + hairpin_info->hairpin_q = true;
> > + hairpin_info->peer_rxp = peer_port;
> > + hairpin_info->peer_rxq_id = peer_q;
> > +
> > + if (conf->manual_bind != 0)
> > + cpfl_vport->p2p_manual_bind = true;
> > + else
> > + cpfl_vport->p2p_manual_bind = false;
> > +
> > + /* Always Tx hairpin queue allocates Tx HW ring */
> > + ring_size = RTE_ALIGN(txq->nb_tx_desc * CPFL_P2P_DESC_LEN,
> > + CPFL_DMA_MEM_ALIGN);
> > + mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_ring", logic_qid,
> > + ring_size + CPFL_P2P_RING_BUF,
> > + CPFL_RING_BASE_ALIGN,
> > + dev->device->numa_node);
> > + if (!mz) {
> > + PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
> > + rte_free(txq->sw_ring);
> > + rte_free(txq);
> > + return -ENOMEM;
> > + }
> > +
> > + txq->tx_ring_phys_addr = mz->iova;
> > + txq->desc_ring = mz->addr;
> > + txq->mz = mz;
> > +
> > + cpfl_tx_hairpin_descq_reset(txq);
> > + txq->qtx_tail = hw->hw_addr +
> > + cpfl_hw_qtail_get(cpfl_vport-
> > >p2p_q_chunks_info.tx_qtail_start,
> > + logic_qid, cpfl_vport-
> > >p2p_q_chunks_info.tx_qtail_spacing);
> > + txq->ops = &def_txq_ops;
> > +
> > + if (cpfl_vport->p2p_tx_complq == NULL) {
> [Liu, Mingxia] In cpfl_rx_hairpin_queue_setup(), "logic_qid" is used to identify
> if it is the first time to allocate "p2p_rx_bufq" buffer, Can it be unified, using
> logic_qid == 0 or p2p_tx_complq/ p2p_rx_bufq == NULL ?
>
>
Yes, thanks for the catch.
>
> > -----Original Message-----
> > From: Xing, Beilei <beilei.xing@intel.com>
> > Sent: Friday, May 19, 2023 3:31 PM
> > To: Wu, Jingjing <jingjing.wu@intel.com>
> > Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> > <beilei.xing@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>
> > Subject: [PATCH v3 05/10] net/cpfl: support hairpin queue setup and release
> >
> > From: Beilei Xing <beilei.xing@intel.com>
> >
> > Support hairpin Rx/Tx queue setup and release.
> >
> > Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> > Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > ---
> > drivers/net/cpfl/cpfl_ethdev.c | 6 +
> > drivers/net/cpfl/cpfl_ethdev.h | 12 +
> > drivers/net/cpfl/cpfl_rxtx.c | 373 +++++++++++++++++++++++-
> > drivers/net/cpfl/cpfl_rxtx.h | 26 ++
> > drivers/net/cpfl/cpfl_rxtx_vec_common.h | 4 +
> > 5 files changed, 420 insertions(+), 1 deletion(-)
> >
> > diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
> index
> > 8e471d2a9b..03813716ce 100644
> > --- a/drivers/net/cpfl/cpfl_ethdev.c
> > +++ b/drivers/net/cpfl/cpfl_ethdev.c
> > @@ -874,6 +874,10 @@ cpfl_dev_close(struct rte_eth_dev *dev)
> > struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport-
> > >adapter);
> >
> > cpfl_dev_stop(dev);
> > + if (cpfl_vport->p2p_mp) {
> > + rte_mempool_free(cpfl_vport->p2p_mp);
> > + cpfl_vport->p2p_mp = NULL;
> > + }
> >
> > if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
> > cpfl_p2p_queue_grps_del(vport);
> > @@ -916,6 +920,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
> > .xstats_get_names = cpfl_dev_xstats_get_names,
> > .xstats_reset = cpfl_dev_xstats_reset,
> > .hairpin_cap_get = cpfl_hairpin_cap_get,
> > + .rx_hairpin_queue_setup =
> cpfl_rx_hairpin_queue_setup,
> > + .tx_hairpin_queue_setup =
> cpfl_tx_hairpin_queue_setup,
> > };
> >
> > static int
> > diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
> index
> > 65c9a195b2..a48344299c 100644
> > --- a/drivers/net/cpfl/cpfl_ethdev.h
> > +++ b/drivers/net/cpfl/cpfl_ethdev.h
> > @@ -89,6 +89,18 @@ struct p2p_queue_chunks_info { struct cpfl_vport {
> > struct idpf_vport base;
> > struct p2p_queue_chunks_info p2p_q_chunks_info;
> > +
> > + struct rte_mempool *p2p_mp;
> > +
> > + uint16_t nb_data_rxq;
> > + uint16_t nb_data_txq;
> > + uint16_t nb_p2p_rxq;
> > + uint16_t nb_p2p_txq;
> > +
> > + struct idpf_rx_queue *p2p_rx_bufq;
> > + struct idpf_tx_queue *p2p_tx_complq;
> > + bool p2p_manual_bind;
> > +
> > };
> >
> > struct cpfl_adapter_ext {
> > diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c index
> > 04a51b8d15..333a399e73 100644
> > --- a/drivers/net/cpfl/cpfl_rxtx.c
> > +++ b/drivers/net/cpfl/cpfl_rxtx.c
> > @@ -10,6 +10,79 @@
> > #include "cpfl_rxtx.h"
> > #include "cpfl_rxtx_vec_common.h"
> >
> > +uint16_t
> > +cpfl_hw_qid_get(uint16_t start_qid, uint16_t offset) {
> > + return start_qid + offset;
> > +}
> > +
> > +uint64_t
> > +cpfl_hw_qtail_get(uint64_t tail_start, uint16_t offset, uint64_t
> > +tail_spacing) {
> > + return tail_start + offset * tail_spacing; }
> > +
> > +static inline void
> > +cpfl_tx_hairpin_descq_reset(struct idpf_tx_queue *txq) {
> > + uint32_t i, size;
> > +
> > + if (!txq) {
> > + PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
> > + return;
> > + }
> > +
> > + size = txq->nb_tx_desc * CPFL_P2P_DESC_LEN;
> > + for (i = 0; i < size; i++)
> > + ((volatile char *)txq->desc_ring)[i] = 0; }
> > +
> > +static inline void
> > +cpfl_tx_hairpin_complq_reset(struct idpf_tx_queue *cq) {
> > + uint32_t i, size;
> > +
> > + if (!cq) {
> > + PMD_DRV_LOG(DEBUG, "Pointer to complq is NULL");
> > + return;
> > + }
> > +
> > + size = cq->nb_tx_desc * CPFL_P2P_DESC_LEN;
> > + for (i = 0; i < size; i++)
> > + ((volatile char *)cq->compl_ring)[i] = 0; }
> > +
> > +static inline void
> > +cpfl_rx_hairpin_descq_reset(struct idpf_rx_queue *rxq) {
> > + uint16_t len;
> > + uint32_t i;
> > +
> > + if (!rxq)
> > + return;
> > +
> > + len = rxq->nb_rx_desc;
> > + for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
> > + ((volatile char *)rxq->rx_ring)[i] = 0; }
> > +
> > +static inline void
> > +cpfl_rx_hairpin_bufq_reset(struct idpf_rx_queue *rxbq) {
> > + uint16_t len;
> > + uint32_t i;
> > +
> > + if (!rxbq)
> > + return;
> > +
> > + len = rxbq->nb_rx_desc;
> > + for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
> > + ((volatile char *)rxbq->rx_ring)[i] = 0;
> > +
> > + rxbq->bufq1 = NULL;
> > + rxbq->bufq2 = NULL;
> > +}
> > +
> > static uint64_t
> > cpfl_rx_offload_convert(uint64_t offload) { @@ -234,7 +307,10 @@
> > cpfl_rx_queue_release(void *rxq)
> >
> > /* Split queue */
> > if (!q->adapter->is_rx_singleq) {
> > - if (q->bufq2)
> > + /* the mz is shared between Tx/Rx hairpin, let Rx_release
> > + * free the buf, q->bufq1->mz and q->mz.
> > + */
> > + if (!cpfl_rxq->hairpin_info.hairpin_q && q->bufq2)
> > cpfl_rx_split_bufq_release(q->bufq2);
> >
> > if (q->bufq1)
> > @@ -385,6 +461,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev,
> uint16_t
> > queue_idx,
> > }
> > }
> >
> > + cpfl_vport->nb_data_rxq++;
> > rxq->q_set = true;
> > dev->data->rx_queues[queue_idx] = cpfl_rxq;
> >
> > @@ -548,6 +625,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev,
> uint16_t
> > queue_idx,
> > txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
> > queue_idx * vport->chunks_info.tx_qtail_spacing);
> > txq->ops = &def_txq_ops;
> > + cpfl_vport->nb_data_txq++;
> > txq->q_set = true;
> > dev->data->tx_queues[queue_idx] = cpfl_txq;
> >
> > @@ -562,6 +640,297 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev,
> > uint16_t queue_idx,
> > return ret;
> > }
> >
> > +static int
> > +cpfl_rx_hairpin_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue
> > *bufq,
> > + uint16_t logic_qid, uint16_t nb_desc) {
> > + struct cpfl_vport *cpfl_vport =
> > + (struct cpfl_vport *)dev->data->dev_private;
> > + struct idpf_vport *vport = &cpfl_vport->base;
> > + struct idpf_adapter *adapter = vport->adapter;
> > + struct rte_mempool *mp;
> > + char pool_name[RTE_MEMPOOL_NAMESIZE];
> > +
> > + mp = cpfl_vport->p2p_mp;
> > + if (!mp) {
> > + snprintf(pool_name, RTE_MEMPOOL_NAMESIZE,
> > "p2p_mb_pool_%u",
> > + dev->data->port_id);
> > + mp = rte_pktmbuf_pool_create(pool_name,
> > CPFL_P2P_NB_MBUF, CPFL_P2P_CACHE_SIZE,
> > + 0, CPFL_P2P_MBUF_SIZE, dev-
> > >device->numa_node);
> > + if (!mp) {
> > + PMD_INIT_LOG(ERR, "Failed to allocate mbuf pool for
> > p2p");
> > + return -ENOMEM;
> > + }
> > + cpfl_vport->p2p_mp = mp;
> > + }
> > +
> > + bufq->mp = mp;
> > + bufq->nb_rx_desc = nb_desc;
> > + bufq->queue_id = cpfl_hw_qid_get(cpfl_vport-
> > >p2p_q_chunks_info.rx_buf_start_qid, logic_qid);
> > + bufq->port_id = dev->data->port_id;
> > + bufq->adapter = adapter;
> > + bufq->rx_buf_len = CPFL_P2P_MBUF_SIZE -
> > RTE_PKTMBUF_HEADROOM;
> > +
> > + bufq->sw_ring = rte_zmalloc("sw ring",
> > + sizeof(struct rte_mbuf *) * nb_desc,
> > + RTE_CACHE_LINE_SIZE);
> > + if (!bufq->sw_ring) {
> > + PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
> > + return -ENOMEM;
> > + }
> > +
> > + bufq->q_set = true;
> > + bufq->ops = &def_rxq_ops;
> > +
> > + return 0;
> > +}
> > +
> > +int
> > +cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
> > + uint16_t nb_desc,
> > + const struct rte_eth_hairpin_conf *conf) {
> > + struct cpfl_vport *cpfl_vport = (struct cpfl_vport *)dev->data-
> > >dev_private;
> > + struct idpf_vport *vport = &cpfl_vport->base;
> > + struct idpf_adapter *adapter_base = vport->adapter;
> > + uint16_t logic_qid = cpfl_vport->nb_p2p_rxq;
> > + struct cpfl_rxq_hairpin_info *hairpin_info;
> > + struct cpfl_rx_queue *cpfl_rxq;
> > + struct idpf_rx_queue *bufq1 = NULL;
> > + struct idpf_rx_queue *rxq;
> > + uint16_t peer_port, peer_q;
> > + uint16_t qid;
> > + int ret;
> > +
> > + if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
> > + PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin
> > queue.");
> > + return -EINVAL;
> > + }
> > +
> > + if (conf->peer_count != 1) {
> > + PMD_INIT_LOG(ERR, "Can't support Rx hairpin queue peer
> > count %d", conf->peer_count);
> > + return -EINVAL;
> > + }
> > +
> > + peer_port = conf->peers[0].port;
> > + peer_q = conf->peers[0].queue;
> > +
> > + if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
> > + nb_desc > CPFL_MAX_RING_DESC ||
> > + nb_desc < CPFL_MIN_RING_DESC) {
> > + PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is
> > invalid", nb_desc);
> > + return -EINVAL;
> > + }
> > +
> > + /* Free memory if needed */
> > + if (dev->data->rx_queues[queue_idx]) {
> > + cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
> > + dev->data->rx_queues[queue_idx] = NULL;
> > + }
> > +
> > + /* Setup Rx description queue */
> > + cpfl_rxq = rte_zmalloc_socket("cpfl hairpin rxq",
> > + sizeof(struct cpfl_rx_queue),
> > + RTE_CACHE_LINE_SIZE,
> > + SOCKET_ID_ANY);
> > + if (!cpfl_rxq) {
> > + PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue
> > data structure");
> > + return -ENOMEM;
> > + }
> > +
> > + rxq = &cpfl_rxq->base;
> > + hairpin_info = &cpfl_rxq->hairpin_info;
> > + rxq->nb_rx_desc = nb_desc * 2;
> > + rxq->queue_id = cpfl_hw_qid_get(cpfl_vport-
> > >p2p_q_chunks_info.rx_start_qid, logic_qid);
> > + rxq->port_id = dev->data->port_id;
> > + rxq->adapter = adapter_base;
> > + rxq->rx_buf_len = CPFL_P2P_MBUF_SIZE -
> RTE_PKTMBUF_HEADROOM;
> > + hairpin_info->hairpin_q = true;
> > + hairpin_info->peer_txp = peer_port;
> > + hairpin_info->peer_txq_id = peer_q;
> > +
> > + if (conf->manual_bind != 0)
> > + cpfl_vport->p2p_manual_bind = true;
> > + else
> > + cpfl_vport->p2p_manual_bind = false;
> > +
> > + /* setup 1 Rx buffer queue for the 1st hairpin rxq */
> > + if (logic_qid == 0) {
> > + bufq1 = rte_zmalloc_socket("hairpin rx bufq1",
> > + sizeof(struct idpf_rx_queue),
> > + RTE_CACHE_LINE_SIZE,
> > + SOCKET_ID_ANY);
> > + if (!bufq1) {
> > + PMD_INIT_LOG(ERR, "Failed to allocate memory for
> > hairpin Rx buffer queue 1.");
> > + ret = -ENOMEM;
> > + goto err_alloc_bufq1;
> > + }
> > + qid = 2 * logic_qid;
> > + ret = cpfl_rx_hairpin_bufq_setup(dev, bufq1, qid, nb_desc);
> > + if (ret) {
> > + PMD_INIT_LOG(ERR, "Failed to setup hairpin Rx buffer
> > queue 1");
> > + ret = -EINVAL;
> > + goto err_setup_bufq1;
> > + }
> > + cpfl_vport->p2p_rx_bufq = bufq1;
> > + }
> > +
> > + rxq->bufq1 = cpfl_vport->p2p_rx_bufq;
> > + rxq->bufq2 = NULL;
> > +
> > + cpfl_vport->nb_p2p_rxq++;
> > + rxq->q_set = true;
> > + dev->data->rx_queues[queue_idx] = cpfl_rxq;
> > +
> > + return 0;
> > +
> > +err_setup_bufq1:
> > + rte_free(bufq1);
> > +err_alloc_bufq1:
> > + rte_free(rxq);
> > +
> > + return ret;
> > +}
> > +
> > +int
> > +cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
> > + uint16_t nb_desc,
> > + const struct rte_eth_hairpin_conf *conf) {
> > + struct cpfl_vport *cpfl_vport =
> > + (struct cpfl_vport *)dev->data->dev_private;
> > +
> > + struct idpf_vport *vport = &cpfl_vport->base;
> > + struct idpf_adapter *adapter_base = vport->adapter;
> > + uint16_t logic_qid = cpfl_vport->nb_p2p_txq;
> > + struct cpfl_txq_hairpin_info *hairpin_info;
> > + struct idpf_hw *hw = &adapter_base->hw;
> > + struct cpfl_tx_queue *cpfl_txq;
> > + struct idpf_tx_queue *txq, *cq;
> > + const struct rte_memzone *mz;
> > + uint32_t ring_size;
> > + uint16_t peer_port, peer_q;
> > +
> > + if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
> > + PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin
> > queue.");
> > + return -EINVAL;
> > + }
> > +
> > + if (conf->peer_count != 1) {
> > + PMD_INIT_LOG(ERR, "Can't support Tx hairpin queue peer
> > count %d", conf->peer_count);
> > + return -EINVAL;
> > + }
> > +
> > + peer_port = conf->peers[0].port;
> > + peer_q = conf->peers[0].queue;
> > +
> > + if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
> > + nb_desc > CPFL_MAX_RING_DESC ||
> > + nb_desc < CPFL_MIN_RING_DESC) {
> > + PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is
> > invalid",
> > + nb_desc);
> > + return -EINVAL;
> > + }
> > +
> > + /* Free memory if needed. */
> > + if (dev->data->tx_queues[queue_idx]) {
> > + cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
> > + dev->data->tx_queues[queue_idx] = NULL;
> > + }
> > +
> > + /* Allocate the TX queue data structure. */
> > + cpfl_txq = rte_zmalloc_socket("cpfl hairpin txq",
> > + sizeof(struct cpfl_tx_queue),
> > + RTE_CACHE_LINE_SIZE,
> > + SOCKET_ID_ANY);
> > + if (!cpfl_txq) {
> > + PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue
> > structure");
> > + return -ENOMEM;
> > + }
> > +
> > + txq = &cpfl_txq->base;
> > + hairpin_info = &cpfl_txq->hairpin_info;
> > + /* Txq ring length should be 2 times of Tx completion queue size. */
> > + txq->nb_tx_desc = nb_desc * 2;
> > + txq->queue_id = cpfl_hw_qid_get(cpfl_vport-
> > >p2p_q_chunks_info.tx_start_qid, logic_qid);
> > + txq->port_id = dev->data->port_id;
> > + hairpin_info->hairpin_q = true;
> > + hairpin_info->peer_rxp = peer_port;
> > + hairpin_info->peer_rxq_id = peer_q;
> > +
> > + if (conf->manual_bind != 0)
> > + cpfl_vport->p2p_manual_bind = true;
> > + else
> > + cpfl_vport->p2p_manual_bind = false;
> > +
> > + /* Always Tx hairpin queue allocates Tx HW ring */
> > + ring_size = RTE_ALIGN(txq->nb_tx_desc * CPFL_P2P_DESC_LEN,
> > + CPFL_DMA_MEM_ALIGN);
> > + mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_ring", logic_qid,
> > + ring_size + CPFL_P2P_RING_BUF,
> > + CPFL_RING_BASE_ALIGN,
> > + dev->device->numa_node);
> > + if (!mz) {
> > + PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
> > + rte_free(txq->sw_ring);
> > + rte_free(txq);
> > + return -ENOMEM;
> > + }
> > +
> > + txq->tx_ring_phys_addr = mz->iova;
> > + txq->desc_ring = mz->addr;
> > + txq->mz = mz;
> > +
> > + cpfl_tx_hairpin_descq_reset(txq);
> > + txq->qtx_tail = hw->hw_addr +
> > + cpfl_hw_qtail_get(cpfl_vport-
> > >p2p_q_chunks_info.tx_qtail_start,
> > + logic_qid, cpfl_vport-
> > >p2p_q_chunks_info.tx_qtail_spacing);
> > + txq->ops = &def_txq_ops;
> > +
> > + if (cpfl_vport->p2p_tx_complq == NULL) {
> [Liu, Mingxia] In cpfl_rx_hairpin_queue_setup(), "logic_qid" is used to identify
> if it is the first time to allocate "p2p_rx_bufq" buffer,
> Can it be unified, using logic_qid == 0 or p2p_tx_complq/ p2p_rx_bufq ==
> NULL ?
> > + cq = rte_zmalloc_socket("cpfl hairpin cq",
> > + sizeof(struct idpf_tx_queue),
> > + RTE_CACHE_LINE_SIZE,
> > + dev->device->numa_node);
> > + if (!cq) {
> > + PMD_INIT_LOG(ERR, "Failed to allocate memory for tx
> > queue structure");
> > + return -ENOMEM;
> > + }
> > +
> > + cq->nb_tx_desc = nb_desc;
> > + cq->queue_id = cpfl_hw_qid_get(cpfl_vport-
> > >p2p_q_chunks_info.tx_compl_start_qid, 0);
> > + cq->port_id = dev->data->port_id;
> > +
> > + /* Tx completion queue always allocates the HW ring */
> > + ring_size = RTE_ALIGN(cq->nb_tx_desc * CPFL_P2P_DESC_LEN,
> > + CPFL_DMA_MEM_ALIGN);
> > + mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_compl_ring",
> > logic_qid,
> > + ring_size + CPFL_P2P_RING_BUF,
> > + CPFL_RING_BASE_ALIGN,
> > + dev->device->numa_node);
> > + if (!mz) {
> > + PMD_INIT_LOG(ERR, "Failed to reserve DMA memory
> > for TX completion queue");
> > + rte_free(txq->sw_ring);
> > + rte_free(txq);
> > + return -ENOMEM;
> > + }
> > + cq->tx_ring_phys_addr = mz->iova;
> > + cq->compl_ring = mz->addr;
> > + cq->mz = mz;
> > +
> > + cpfl_tx_hairpin_complq_reset(cq);
> > + cpfl_vport->p2p_tx_complq = cq;
> > + }
> > +
> > + txq->complq = cpfl_vport->p2p_tx_complq;
> > +
> > + cpfl_vport->nb_p2p_txq++;
> > + txq->q_set = true;
> > + dev->data->tx_queues[queue_idx] = cpfl_txq;
> > +
> > + return 0;
> > +}
> > +
> > int
> > cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id) { @@ -
> > 865,6 +1234,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
> > if (vport->rx_vec_allowed) {
> > for (i = 0; i < dev->data->nb_rx_queues; i++) {
> > cpfl_rxq = dev->data->rx_queues[i];
> > + if (cpfl_rxq->hairpin_info.hairpin_q)
> > + continue;
> > (void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq-
> > >base);
> > }
> > #ifdef CC_AVX512_SUPPORT
> > diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h index
> > 3a87a1f4b3..5e9f2dada7 100644
> > --- a/drivers/net/cpfl/cpfl_rxtx.h
> > +++ b/drivers/net/cpfl/cpfl_rxtx.h
> > @@ -13,6 +13,7 @@
> > #define CPFL_MIN_RING_DESC 32
> > #define CPFL_MAX_RING_DESC 4096
> > #define CPFL_DMA_MEM_ALIGN 4096
> > +#define CPFL_P2P_DESC_LEN 16
> > #define CPFL_MAX_HAIRPINQ_RX_2_TX 1
> > #define CPFL_MAX_HAIRPINQ_TX_2_RX 1
> > #define CPFL_MAX_HAIRPINQ_NB_DESC 1024
> > @@ -21,6 +22,10 @@
> > #define CPFL_P2P_NB_TX_COMPLQ 1
> > #define CPFL_P2P_NB_QUEUE_GRPS 1
> > #define CPFL_P2P_QUEUE_GRP_ID 1
> > +#define CPFL_P2P_NB_MBUF 4096
> > +#define CPFL_P2P_CACHE_SIZE 250
> > +#define CPFL_P2P_MBUF_SIZE 2048
> > +#define CPFL_P2P_RING_BUF 128
> > /* Base address of the HW descriptor ring should be 128B aligned. */
> > #define CPFL_RING_BASE_ALIGN 128
> >
> > @@ -31,12 +36,26 @@
> >
> > #define CPFL_SUPPORT_CHAIN_NUM 5
> >
> > +struct cpfl_rxq_hairpin_info {
> > + bool hairpin_q; /* if rx queue is a hairpin queue */
> > + uint16_t peer_txp;
> > + uint16_t peer_txq_id;
> > +};
> > +
> > struct cpfl_rx_queue {
> > struct idpf_rx_queue base;
> > + struct cpfl_rxq_hairpin_info hairpin_info; };
> > +
> > +struct cpfl_txq_hairpin_info {
> > + bool hairpin_q; /* if tx queue is a hairpin queue */
> > + uint16_t peer_rxp;
> > + uint16_t peer_rxq_id;
> > };
> >
> > struct cpfl_tx_queue {
> > struct idpf_tx_queue base;
> > + struct cpfl_txq_hairpin_info hairpin_info;
> > };
> >
> > int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -
> > 57,4 +76,11 @@ void cpfl_dev_tx_queue_release(struct rte_eth_dev *dev,
> > uint16_t qid); void cpfl_dev_rx_queue_release(struct rte_eth_dev *dev,
> > uint16_t qid); void cpfl_set_rx_function(struct rte_eth_dev *dev); void
> > cpfl_set_tx_function(struct rte_eth_dev *dev);
> > +uint16_t cpfl_hw_qid_get(uint16_t start_qid, uint16_t offset); uint64_t
> > +cpfl_hw_qtail_get(uint64_t tail_start, uint16_t offset, uint64_t
> > +tail_spacing); int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev,
> > uint16_t queue_idx,
> > + uint16_t nb_desc, const struct
> > rte_eth_hairpin_conf *conf); int
> > +cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
> > + uint16_t nb_desc,
> > + const struct rte_eth_hairpin_conf *conf);
> > #endif /* _CPFL_RXTX_H_ */
> > diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
> > b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
> > index 5690b17911..d8e9191196 100644
> > --- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
> > +++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
> > @@ -85,6 +85,8 @@ cpfl_rx_vec_dev_check_default(struct rte_eth_dev
> *dev)
> > cpfl_rxq = dev->data->rx_queues[i];
> > default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);
> > if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
> > + if (cpfl_rxq->hairpin_info.hairpin_q)
> > + continue;
> > splitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq-
> > >base);
> > ret = splitq_ret && default_ret;
> > } else {
> > @@ -106,6 +108,8 @@ cpfl_tx_vec_dev_check_default(struct rte_eth_dev
> *dev)
> >
> > for (i = 0; i < dev->data->nb_tx_queues; i++) {
> > cpfl_txq = dev->data->tx_queues[i];
> > + if (cpfl_txq->hairpin_info.hairpin_q)
> > + continue;
> > ret = cpfl_tx_vec_queue_default(&cpfl_txq->base);
> > if (ret == CPFL_SCALAR_PATH)
> > return CPFL_SCALAR_PATH;
> > --
> > 2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH v3 05/10] net/cpfl: support hairpin queue setup and release
2023-05-25 3:58 ` Wu, Jingjing
@ 2023-05-26 3:52 ` Xing, Beilei
0 siblings, 0 replies; 164+ messages in thread
From: Xing, Beilei @ 2023-05-26 3:52 UTC (permalink / raw)
To: Wu, Jingjing; +Cc: dev, Liu, Mingxia, Wang, Xiao W
> -----Original Message-----
> From: Wu, Jingjing <jingjing.wu@intel.com>
> Sent: Thursday, May 25, 2023 11:59 AM
> To: Xing, Beilei <beilei.xing@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Wang, Xiao W
> <xiao.w.wang@intel.com>
> Subject: RE: [PATCH v3 05/10] net/cpfl: support hairpin queue setup and
> release
>
> >
> > +static int
> > +cpfl_rx_hairpin_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue
> *bufq,
> > + uint16_t logic_qid, uint16_t nb_desc) {
> > + struct cpfl_vport *cpfl_vport =
> > + (struct cpfl_vport *)dev->data->dev_private;
> > + struct idpf_vport *vport = &cpfl_vport->base;
> > + struct idpf_adapter *adapter = vport->adapter;
> > + struct rte_mempool *mp;
> > + char pool_name[RTE_MEMPOOL_NAMESIZE];
> > +
> > + mp = cpfl_vport->p2p_mp;
> > + if (!mp) {
> > + snprintf(pool_name, RTE_MEMPOOL_NAMESIZE,
> "p2p_mb_pool_%u",
> > + dev->data->port_id);
> > + mp = rte_pktmbuf_pool_create(pool_name,
> CPFL_P2P_NB_MBUF,
> > CPFL_P2P_CACHE_SIZE,
> > + 0, CPFL_P2P_MBUF_SIZE, dev-
> >device-
> > >numa_node);
> > + if (!mp) {
> > + PMD_INIT_LOG(ERR, "Failed to allocate mbuf pool for
> p2p");
> > + return -ENOMEM;
> > + }
> > + cpfl_vport->p2p_mp = mp;
> > + }
> > +
> > + bufq->mp = mp;
> > + bufq->nb_rx_desc = nb_desc;
> > + bufq->queue_id = cpfl_hw_qid_get(cpfl_vport-
> > >p2p_q_chunks_info.rx_buf_start_qid, logic_qid);
> > + bufq->port_id = dev->data->port_id;
> > + bufq->adapter = adapter;
> > + bufq->rx_buf_len = CPFL_P2P_MBUF_SIZE -
> RTE_PKTMBUF_HEADROOM;
> > +
> > + bufq->sw_ring = rte_zmalloc("sw ring",
> > + sizeof(struct rte_mbuf *) * nb_desc,
> > + RTE_CACHE_LINE_SIZE);
>
> Is sw_ring required in p2p case? It has been never used right?
> Please also check the sw_ring in tx queue.
Yes, it should be removed.
>
> > + if (!bufq->sw_ring) {
> > + PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
> > + return -ENOMEM;
> > + }
> > +
> > + bufq->q_set = true;
> > + bufq->ops = &def_rxq_ops;
> > +
> > + return 0;
> > +}
> > +
> > +int
> > +cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
> > + uint16_t nb_desc,
> > + const struct rte_eth_hairpin_conf *conf) {
> > + struct cpfl_vport *cpfl_vport = (struct cpfl_vport *)dev->data-
> >dev_private;
> > + struct idpf_vport *vport = &cpfl_vport->base;
> > + struct idpf_adapter *adapter_base = vport->adapter;
> > + uint16_t logic_qid = cpfl_vport->nb_p2p_rxq;
> > + struct cpfl_rxq_hairpin_info *hairpin_info;
> > + struct cpfl_rx_queue *cpfl_rxq;
> > + struct idpf_rx_queue *bufq1 = NULL;
> > + struct idpf_rx_queue *rxq;
> > + uint16_t peer_port, peer_q;
> > + uint16_t qid;
> > + int ret;
> > +
> > + if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
> > + PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin
> queue.");
> > + return -EINVAL;
> > + }
> > +
> > + if (conf->peer_count != 1) {
> > + PMD_INIT_LOG(ERR, "Can't support Rx hairpin queue peer
> count %d",
> > conf->peer_count);
> > + return -EINVAL;
> > + }
> > +
> > + peer_port = conf->peers[0].port;
> > + peer_q = conf->peers[0].queue;
> > +
> > + if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
> > + nb_desc > CPFL_MAX_RING_DESC ||
> > + nb_desc < CPFL_MIN_RING_DESC) {
> > + PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is
> invalid",
> > nb_desc);
> > + return -EINVAL;
> > + }
> > +
> > + /* Free memory if needed */
> > + if (dev->data->rx_queues[queue_idx]) {
> > + cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
> > + dev->data->rx_queues[queue_idx] = NULL;
> > + }
> > +
> > + /* Setup Rx description queue */
> > + cpfl_rxq = rte_zmalloc_socket("cpfl hairpin rxq",
> > + sizeof(struct cpfl_rx_queue),
> > + RTE_CACHE_LINE_SIZE,
> > + SOCKET_ID_ANY);
> > + if (!cpfl_rxq) {
> > + PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue
> data
> > structure");
> > + return -ENOMEM;
> > + }
> > +
> > + rxq = &cpfl_rxq->base;
> > + hairpin_info = &cpfl_rxq->hairpin_info;
> > + rxq->nb_rx_desc = nb_desc * 2;
> > + rxq->queue_id =
> > +cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info.rx_start_qid,
> > logic_qid);
> > + rxq->port_id = dev->data->port_id;
> > + rxq->adapter = adapter_base;
> > + rxq->rx_buf_len = CPFL_P2P_MBUF_SIZE -
> RTE_PKTMBUF_HEADROOM;
> > + hairpin_info->hairpin_q = true;
> > + hairpin_info->peer_txp = peer_port;
> > + hairpin_info->peer_txq_id = peer_q;
> > +
> > + if (conf->manual_bind != 0)
> > + cpfl_vport->p2p_manual_bind = true;
> > + else
> > + cpfl_vport->p2p_manual_bind = false;
> > +
> > + /* setup 1 Rx buffer queue for the 1st hairpin rxq */
> > + if (logic_qid == 0) {
> > + bufq1 = rte_zmalloc_socket("hairpin rx bufq1",
> > + sizeof(struct idpf_rx_queue),
> > + RTE_CACHE_LINE_SIZE,
> > + SOCKET_ID_ANY);
> > + if (!bufq1) {
> > + PMD_INIT_LOG(ERR, "Failed to allocate memory for
> hairpin Rx
> > buffer queue 1.");
> > + ret = -ENOMEM;
> > + goto err_alloc_bufq1;
> > + }
> > + qid = 2 * logic_qid;
>
> Inside the brace ( if (logic_qid=0) {} ), the logic_qid should be zero, right? What
> is the purpose of doing qid = 2* logic_qid?
The if condition should be refined.
>
> > + ret = cpfl_rx_hairpin_bufq_setup(dev, bufq1, qid, nb_desc);
> > + if (ret) {
> > + PMD_INIT_LOG(ERR, "Failed to setup hairpin Rx buffer
> queue 1");
> > + ret = -EINVAL;
> > + goto err_setup_bufq1;
> > + }
> > + cpfl_vport->p2p_rx_bufq = bufq1;
> > + }
> > +
> > + rxq->bufq1 = cpfl_vport->p2p_rx_bufq;
> > + rxq->bufq2 = NULL;
> > +
>
> cpfl_vport->p2p_rx_bufq is allocated in this function. But haven't seen where it
> will be released.
It will be released in cpfl_rx_queue_release function.
> And in cpfl_rx_hairpin_bufq_reset the rxq->bufq1 will be assigned to NULL.
> Will queue release miss this?
>
> > + cpfl_vport->nb_p2p_rxq++;
> > + rxq->q_set = true;
> > + dev->data->rx_queues[queue_idx] = cpfl_rxq;
> > +
> > + return 0;
> > +
> > +err_setup_bufq1:
> > + rte_free(bufq1);
> > +err_alloc_bufq1:
> > + rte_free(rxq);
> > +
> > + return ret;
> > +}
> > +
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v4 00/13] net/cpfl: add hairpin queue support
2023-05-19 7:31 ` [PATCH v3 00/10] net/cpfl: add hairpin queue support beilei.xing
` (9 preceding siblings ...)
2023-05-19 7:31 ` [PATCH v3 10/10] net/cpfl: support hairpin bind/unbind beilei.xing
@ 2023-05-26 7:38 ` beilei.xing
2023-05-26 7:38 ` [PATCH v4 01/13] net/cpfl: refine structures beilei.xing
` (13 more replies)
10 siblings, 14 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-26 7:38 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patchset adds hairpin queue support.
v2 changes:
- change hairpin rx queus configuration sequence.
- code refine.
v3 changes:
- Refine the patchset based on the latest code.
v4 change:
- Remove hairpin rx buffer queue's sw_ring.
- Change hairpin rx queus configuration sequence in cpfl_hairpin_bind function.
- Refind hairpin queue setup and release.
Beilei Xing (13):
net/cpfl: refine structures
common/idpf: support queue groups add/delete
net/cpfl: add haipin queue group during vport init
net/cpfl: support hairpin queue capbility get
net/cpfl: support hairpin queue setup and release
common/idpf: add queue config API
net/cpfl: support hairpin queue configuration
common/idpf: add switch queue API
net/cpfl: support hairpin queue start/stop
common/idpf: add irq map config API
net/cpfl: enable write back based on ITR expire
net/cpfl: support peer ports get
net/cpfl: support hairpin bind/unbind
drivers/common/idpf/idpf_common_device.c | 75 ++
drivers/common/idpf/idpf_common_device.h | 4 +
drivers/common/idpf/idpf_common_virtchnl.c | 138 +++-
drivers/common/idpf/idpf_common_virtchnl.h | 18 +
drivers/common/idpf/version.map | 6 +
drivers/net/cpfl/cpfl_ethdev.c | 605 ++++++++++++++--
drivers/net/cpfl/cpfl_ethdev.h | 35 +-
drivers/net/cpfl/cpfl_rxtx.c | 785 +++++++++++++++++++--
drivers/net/cpfl/cpfl_rxtx.h | 77 ++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 21 +-
10 files changed, 1645 insertions(+), 119 deletions(-)
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v4 01/13] net/cpfl: refine structures
2023-05-26 7:38 ` [PATCH v4 00/13] net/cpfl: add hairpin queue support beilei.xing
@ 2023-05-26 7:38 ` beilei.xing
2023-05-26 7:38 ` [PATCH v4 02/13] common/idpf: support queue groups add/delete beilei.xing
` (12 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-26 7:38 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch refines some structures to support hairpin queue,
cpfl_rx_queue/cpfl_tx_queue/cpfl_vport.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 85 +++++++-----
drivers/net/cpfl/cpfl_ethdev.h | 6 +-
drivers/net/cpfl/cpfl_rxtx.c | 175 +++++++++++++++++-------
drivers/net/cpfl/cpfl_rxtx.h | 8 ++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 17 +--
5 files changed, 196 insertions(+), 95 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 7528a14d05..e587155db6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -124,7 +124,8 @@ static int
cpfl_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct rte_eth_link new_link;
unsigned int i;
@@ -156,7 +157,8 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,
static int
cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
dev_info->max_rx_queues = base->caps.max_rx_q;
@@ -216,7 +218,8 @@ cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
static int
cpfl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
/* mtu setting is forbidden if port is start */
if (dev->data->dev_started) {
@@ -256,12 +259,12 @@ static uint64_t
cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
{
uint64_t mbuf_alloc_failed = 0;
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i = 0;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- mbuf_alloc_failed += __atomic_load_n(&rxq->rx_stats.mbuf_alloc_failed,
+ cpfl_rxq = dev->data->rx_queues[i];
+ mbuf_alloc_failed += __atomic_load_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,
__ATOMIC_RELAXED);
}
@@ -271,8 +274,8 @@ cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
static int
cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
int ret;
@@ -305,20 +308,20 @@ cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
static void
cpfl_reset_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- __atomic_store_n(&rxq->rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
+ cpfl_rxq = dev->data->rx_queues[i];
+ __atomic_store_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
}
}
static int
cpfl_dev_stats_reset(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
int ret;
@@ -343,8 +346,8 @@ static int cpfl_dev_xstats_reset(struct rte_eth_dev *dev)
static int cpfl_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned int n)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
unsigned int i;
int ret;
@@ -459,7 +462,8 @@ cpfl_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t idx, shift;
int ret = 0;
@@ -498,7 +502,8 @@ cpfl_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t idx, shift;
int ret = 0;
@@ -536,7 +541,8 @@ static int
cpfl_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
int ret = 0;
@@ -601,7 +607,8 @@ static int
cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
int ret = 0;
@@ -638,7 +645,8 @@ cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,
static int
cpfl_dev_configure(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct rte_eth_conf *conf = &dev->data->dev_conf;
struct idpf_adapter *base = vport->adapter;
int ret;
@@ -710,7 +718,8 @@ cpfl_dev_configure(struct rte_eth_dev *dev)
static int
cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
uint16_t nb_rx_queues = dev->data->nb_rx_queues;
return idpf_vport_irq_map_config(vport, nb_rx_queues);
@@ -719,14 +728,14 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
static int
cpfl_start_queues(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
- struct idpf_tx_queue *txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
int err = 0;
int i;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL || txq->tx_deferred_start)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
continue;
err = cpfl_tx_queue_start(dev, i);
if (err != 0) {
@@ -736,8 +745,8 @@ cpfl_start_queues(struct rte_eth_dev *dev)
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- if (rxq == NULL || rxq->rx_deferred_start)
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)
continue;
err = cpfl_rx_queue_start(dev, i);
if (err != 0) {
@@ -752,7 +761,8 @@ cpfl_start_queues(struct rte_eth_dev *dev)
static int
cpfl_dev_start(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(base);
uint16_t num_allocated_vectors = base->caps.num_allocated_vectors;
@@ -813,7 +823,8 @@ cpfl_dev_start(struct rte_eth_dev *dev)
static int
cpfl_dev_stop(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
if (dev->data->dev_started == 0)
return 0;
@@ -832,7 +843,8 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
static int
cpfl_dev_close(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
@@ -842,7 +854,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
adapter->cur_vport_nb--;
dev->data->dev_private = NULL;
adapter->vports[vport->sw_idx] = NULL;
- rte_free(vport);
+ rte_free(cpfl_vport);
return 0;
}
@@ -1047,7 +1059,7 @@ cpfl_find_vport(struct cpfl_adapter_ext *adapter, uint32_t vport_id)
int i;
for (i = 0; i < adapter->cur_vport_nb; i++) {
- vport = adapter->vports[i];
+ vport = &adapter->vports[i]->base;
if (vport->vport_id != vport_id)
continue;
else
@@ -1275,7 +1287,8 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter)
static int
cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_vport_param *param = init_params;
struct cpfl_adapter_ext *adapter = param->adapter;
/* for sending create vport virtchnl msg prepare */
@@ -1300,7 +1313,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
goto err;
}
- adapter->vports[param->idx] = vport;
+ adapter->vports[param->idx] = cpfl_vport;
adapter->cur_vports |= RTE_BIT32(param->devarg_id);
adapter->cur_vport_nb++;
@@ -1415,7 +1428,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
snprintf(name, sizeof(name), "cpfl_%s_vport_0",
pci_dev->device.name);
retval = rte_eth_dev_create(&pci_dev->device, name,
- sizeof(struct idpf_vport),
+ sizeof(struct cpfl_vport),
NULL, NULL, cpfl_dev_vport_init,
&vport_param);
if (retval != 0)
@@ -1433,7 +1446,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
pci_dev->device.name,
devargs.req_vports[i]);
retval = rte_eth_dev_create(&pci_dev->device, name,
- sizeof(struct idpf_vport),
+ sizeof(struct cpfl_vport),
NULL, NULL, cpfl_dev_vport_init,
&vport_param);
if (retval != 0)
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 200dfcac02..81fe9ac4c3 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -69,13 +69,17 @@ struct cpfl_devargs {
uint16_t req_vport_nb;
};
+struct cpfl_vport {
+ struct idpf_vport base;
+};
+
struct cpfl_adapter_ext {
TAILQ_ENTRY(cpfl_adapter_ext) next;
struct idpf_adapter base;
char name[CPFL_ADAPTER_NAME_LEN];
- struct idpf_vport **vports;
+ struct cpfl_vport **vports;
uint16_t max_vport_nb;
uint16_t cur_vports; /* bit mask of created vport */
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 75021c3c54..04a51b8d15 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -128,7 +128,8 @@ cpfl_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *rxq,
uint16_t nb_desc, unsigned int socket_id,
struct rte_mempool *mp, uint8_t bufq_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct idpf_hw *hw = &base->hw;
const struct rte_memzone *mz;
@@ -220,15 +221,69 @@ cpfl_rx_split_bufq_release(struct idpf_rx_queue *bufq)
rte_free(bufq);
}
+static void
+cpfl_rx_queue_release(void *rxq)
+{
+ struct cpfl_rx_queue *cpfl_rxq = rxq;
+ struct idpf_rx_queue *q = NULL;
+
+ if (cpfl_rxq == NULL)
+ return;
+
+ q = &cpfl_rxq->base;
+
+ /* Split queue */
+ if (!q->adapter->is_rx_singleq) {
+ if (q->bufq2)
+ cpfl_rx_split_bufq_release(q->bufq2);
+
+ if (q->bufq1)
+ cpfl_rx_split_bufq_release(q->bufq1);
+
+ rte_free(cpfl_rxq);
+ return;
+ }
+
+ /* Single queue */
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(cpfl_rxq);
+}
+
+static void
+cpfl_tx_queue_release(void *txq)
+{
+ struct cpfl_tx_queue *cpfl_txq = txq;
+ struct idpf_tx_queue *q = NULL;
+
+ if (cpfl_txq == NULL)
+ return;
+
+ q = &cpfl_txq->base;
+
+ if (q->complq) {
+ rte_memzone_free(q->complq->mz);
+ rte_free(q->complq);
+ }
+
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(cpfl_txq);
+}
+
int
cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct idpf_hw *hw = &base->hw;
+ struct cpfl_rx_queue *cpfl_rxq;
const struct rte_memzone *mz;
struct idpf_rx_queue *rxq;
uint16_t rx_free_thresh;
@@ -248,21 +303,23 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Free memory if needed */
if (dev->data->rx_queues[queue_idx] != NULL) {
- idpf_qc_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
dev->data->rx_queues[queue_idx] = NULL;
}
/* Setup Rx queue */
- rxq = rte_zmalloc_socket("cpfl rxq",
- sizeof(struct idpf_rx_queue),
+ cpfl_rxq = rte_zmalloc_socket("cpfl rxq",
+ sizeof(struct cpfl_rx_queue),
RTE_CACHE_LINE_SIZE,
socket_id);
- if (rxq == NULL) {
+ if (cpfl_rxq == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
ret = -ENOMEM;
goto err_rxq_alloc;
}
+ rxq = &cpfl_rxq->base;
+
is_splitq = !!(vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
rxq->mp = mp;
@@ -329,7 +386,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
rxq->q_set = true;
- dev->data->rx_queues[queue_idx] = rxq;
+ dev->data->rx_queues[queue_idx] = cpfl_rxq;
return 0;
@@ -349,7 +406,8 @@ cpfl_tx_complq_setup(struct rte_eth_dev *dev, struct idpf_tx_queue *txq,
uint16_t queue_idx, uint16_t nb_desc,
unsigned int socket_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
const struct rte_memzone *mz;
struct idpf_tx_queue *cq;
int ret;
@@ -397,9 +455,11 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t tx_rs_thresh, tx_free_thresh;
+ struct cpfl_tx_queue *cpfl_txq;
struct idpf_hw *hw = &base->hw;
const struct rte_memzone *mz;
struct idpf_tx_queue *txq;
@@ -419,21 +479,23 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Free memory if needed. */
if (dev->data->tx_queues[queue_idx] != NULL) {
- idpf_qc_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
dev->data->tx_queues[queue_idx] = NULL;
}
/* Allocate the TX queue data structure. */
- txq = rte_zmalloc_socket("cpfl txq",
- sizeof(struct idpf_tx_queue),
+ cpfl_txq = rte_zmalloc_socket("cpfl txq",
+ sizeof(struct cpfl_tx_queue),
RTE_CACHE_LINE_SIZE,
socket_id);
- if (txq == NULL) {
+ if (cpfl_txq == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
ret = -ENOMEM;
goto err_txq_alloc;
}
+ txq = &cpfl_txq->base;
+
is_splitq = !!(vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
txq->nb_tx_desc = nb_desc;
@@ -487,7 +549,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
queue_idx * vport->chunks_info.tx_qtail_spacing);
txq->ops = &def_txq_ops;
txq->q_set = true;
- dev->data->tx_queues[queue_idx] = txq;
+ dev->data->tx_queues[queue_idx] = cpfl_txq;
return 0;
@@ -503,6 +565,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
+ struct cpfl_rx_queue *cpfl_rxq;
struct idpf_rx_queue *rxq;
uint16_t max_pkt_len;
uint32_t frame_size;
@@ -511,7 +574,8 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (rx_queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
- rxq = dev->data->rx_queues[rx_queue_id];
+ cpfl_rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = &cpfl_rxq->base;
if (rxq == NULL || !rxq->q_set) {
PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
@@ -575,9 +639,10 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_rx_queue *rxq =
- dev->data->rx_queues[rx_queue_id];
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq = dev->data->rx_queues[rx_queue_id];
+ struct idpf_rx_queue *rxq = &cpfl_rxq->base;
int err = 0;
err = idpf_vc_rxq_config(vport, rxq);
@@ -610,15 +675,15 @@ cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
if (tx_queue_id >= dev->data->nb_tx_queues)
return -EINVAL;
- txq = dev->data->tx_queues[tx_queue_id];
+ cpfl_txq = dev->data->tx_queues[tx_queue_id];
/* Init the RX tail register. */
- IDPF_PCI_REG_WRITE(txq->qtx_tail, 0);
+ IDPF_PCI_REG_WRITE(cpfl_txq->base.qtx_tail, 0);
return 0;
}
@@ -626,12 +691,13 @@ cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
int
cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_tx_queue *txq =
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_tx_queue *cpfl_txq =
dev->data->tx_queues[tx_queue_id];
int err = 0;
- err = idpf_vc_txq_config(vport, txq);
+ err = idpf_vc_txq_config(vport, &cpfl_txq->base);
if (err != 0) {
PMD_DRV_LOG(ERR, "Fail to configure Tx queue %u", tx_queue_id);
return err;
@@ -650,7 +716,7 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
tx_queue_id);
} else {
- txq->q_started = true;
+ cpfl_txq->base.q_started = true;
dev->data->tx_queue_state[tx_queue_id] =
RTE_ETH_QUEUE_STATE_STARTED;
}
@@ -661,13 +727,16 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
int
cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq;
struct idpf_rx_queue *rxq;
int err;
if (rx_queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
+ cpfl_rxq = dev->data->rx_queues[rx_queue_id];
err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
@@ -675,7 +744,7 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return err;
}
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = &cpfl_rxq->base;
rxq->q_started = false;
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
rxq->ops->release_mbufs(rxq);
@@ -693,13 +762,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_tx_queue *cpfl_txq;
struct idpf_tx_queue *txq;
int err;
if (tx_queue_id >= dev->data->nb_tx_queues)
return -EINVAL;
+ cpfl_txq = dev->data->tx_queues[tx_queue_id];
+
err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
@@ -707,7 +780,7 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
return err;
}
- txq = dev->data->tx_queues[tx_queue_id];
+ txq = &cpfl_txq->base;
txq->q_started = false;
txq->ops->release_mbufs(txq);
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
@@ -724,25 +797,25 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
void
cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- idpf_qc_rx_queue_release(dev->data->rx_queues[qid]);
+ cpfl_rx_queue_release(dev->data->rx_queues[qid]);
}
void
cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- idpf_qc_tx_queue_release(dev->data->tx_queues[qid]);
+ cpfl_tx_queue_release(dev->data->tx_queues[qid]);
}
void
cpfl_stop_queues(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
- struct idpf_tx_queue *txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
int i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- if (rxq == NULL)
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq == NULL)
continue;
if (cpfl_rx_queue_stop(dev, i) != 0)
@@ -750,8 +823,8 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL)
continue;
if (cpfl_tx_queue_stop(dev, i) != 0)
@@ -762,9 +835,10 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
void
cpfl_set_rx_function(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
#ifdef RTE_ARCH_X86
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i;
if (cpfl_rx_vec_dev_check_default(dev) == CPFL_VECTOR_PATH &&
@@ -790,8 +864,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- (void)idpf_qc_splitq_rx_vec_setup(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ (void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
if (vport->rx_use_avx512) {
@@ -810,8 +884,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
} else {
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- (void)idpf_qc_singleq_rx_vec_setup(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ (void)idpf_qc_singleq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
if (vport->rx_use_avx512) {
@@ -860,10 +934,11 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
void
cpfl_set_tx_function(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
#ifdef RTE_ARCH_X86
#ifdef CC_AVX512_SUPPORT
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
int i;
#endif /* CC_AVX512_SUPPORT */
@@ -878,8 +953,8 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
vport->tx_use_avx512 = true;
if (vport->tx_use_avx512) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- idpf_qc_tx_vec_avx512_setup(txq);
+ cpfl_txq = dev->data->tx_queues[i];
+ idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);
}
}
}
@@ -916,10 +991,10 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
#ifdef CC_AVX512_SUPPORT
if (vport->tx_use_avx512) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL)
continue;
- idpf_qc_tx_vec_avx512_setup(txq);
+ idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);
}
PMD_DRV_LOG(NOTICE,
"Using Single AVX512 Vector Tx (port %d).",
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index fb267d38c8..bfb9ad97bd 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -23,6 +23,14 @@
#define CPFL_SUPPORT_CHAIN_NUM 5
+struct cpfl_rx_queue {
+ struct idpf_rx_queue base;
+};
+
+struct cpfl_tx_queue {
+ struct idpf_tx_queue base;
+};
+
int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
index 665418d27d..5690b17911 100644
--- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
+++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
@@ -76,15 +76,16 @@ cpfl_rx_splitq_vec_default(struct idpf_rx_queue *rxq)
static inline int
cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_rx_queue *rxq;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq;
int i, default_ret, splitq_ret, ret = CPFL_SCALAR_PATH;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- default_ret = cpfl_rx_vec_queue_default(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
- splitq_ret = cpfl_rx_splitq_vec_default(rxq);
+ splitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq->base);
ret = splitq_ret && default_ret;
} else {
ret = default_ret;
@@ -100,12 +101,12 @@ static inline int
cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)
{
int i;
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
int ret = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- ret = cpfl_tx_vec_queue_default(txq);
+ cpfl_txq = dev->data->tx_queues[i];
+ ret = cpfl_tx_vec_queue_default(&cpfl_txq->base);
if (ret == CPFL_SCALAR_PATH)
return CPFL_SCALAR_PATH;
}
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v4 02/13] common/idpf: support queue groups add/delete
2023-05-26 7:38 ` [PATCH v4 00/13] net/cpfl: add hairpin queue support beilei.xing
2023-05-26 7:38 ` [PATCH v4 01/13] net/cpfl: refine structures beilei.xing
@ 2023-05-26 7:38 ` beilei.xing
2023-05-26 7:38 ` [PATCH v4 03/13] net/cpfl: add haipin queue group during vport init beilei.xing
` (11 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-26 7:38 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds queue group add/delete virtual channel support.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 66 ++++++++++++++++++++++
drivers/common/idpf/idpf_common_virtchnl.h | 9 +++
drivers/common/idpf/version.map | 2 +
3 files changed, 77 insertions(+)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index b713678634..a3fe55c897 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -359,6 +359,72 @@ idpf_vc_vport_destroy(struct idpf_vport *vport)
return err;
}
+int
+idpf_vc_queue_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *p2p_queue_grps_info,
+ uint8_t *p2p_queue_grps_out)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_cmd_info args;
+ int size, qg_info_size;
+ int err = -1;
+
+ size = sizeof(*p2p_queue_grps_info) +
+ (p2p_queue_grps_info->qg_info.num_queue_groups - 1) *
+ sizeof(struct virtchnl2_queue_group_info);
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_ADD_QUEUE_GROUPS;
+ args.in_args = (uint8_t *)p2p_queue_grps_info;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ if (err != 0) {
+ DRV_LOG(ERR,
+ "Failed to execute command of VIRTCHNL2_OP_ADD_QUEUE_GROUPS");
+ return err;
+ }
+
+ rte_memcpy(p2p_queue_grps_out, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
+ return 0;
+}
+
+int idpf_vc_queue_grps_del(struct idpf_vport *vport,
+ uint16_t num_q_grps,
+ struct virtchnl2_queue_group_id *qg_ids)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_delete_queue_groups *vc_del_q_grps;
+ struct idpf_cmd_info args;
+ int size;
+ int err;
+
+ size = sizeof(*vc_del_q_grps) +
+ (num_q_grps - 1) * sizeof(struct virtchnl2_queue_group_id);
+ vc_del_q_grps = rte_zmalloc("vc_del_q_grps", size, 0);
+
+ vc_del_q_grps->vport_id = vport->vport_id;
+ vc_del_q_grps->num_queue_groups = num_q_grps;
+ memcpy(vc_del_q_grps->qg_ids, qg_ids,
+ num_q_grps * sizeof(struct virtchnl2_queue_group_id));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_DEL_QUEUE_GROUPS;
+ args.in_args = (uint8_t *)vc_del_q_grps;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_DEL_QUEUE_GROUPS");
+
+ rte_free(vc_del_q_grps);
+ return err;
+}
+
int
idpf_vc_rss_key_set(struct idpf_vport *vport)
{
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index c45295290e..58b16e1c5d 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -64,4 +64,13 @@ int idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
__rte_internal
int idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
u16 *buff_count, struct idpf_dma_mem **buffs);
+__rte_internal
+int idpf_vc_queue_grps_del(struct idpf_vport *vport,
+ uint16_t num_q_grps,
+ struct virtchnl2_queue_group_id *qg_ids);
+__rte_internal
+int
+idpf_vc_queue_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *ptp_queue_grps_info,
+ uint8_t *ptp_queue_grps_out);
#endif /* _IDPF_COMMON_VIRTCHNL_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 70334a1b03..01d18f3f3f 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -43,6 +43,8 @@ INTERNAL {
idpf_vc_irq_map_unmap_config;
idpf_vc_one_msg_read;
idpf_vc_ptype_info_query;
+ idpf_vc_queue_grps_add;
+ idpf_vc_queue_grps_del;
idpf_vc_queue_switch;
idpf_vc_queues_ena_dis;
idpf_vc_rss_hash_get;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v4 03/13] net/cpfl: add haipin queue group during vport init
2023-05-26 7:38 ` [PATCH v4 00/13] net/cpfl: add hairpin queue support beilei.xing
2023-05-26 7:38 ` [PATCH v4 01/13] net/cpfl: refine structures beilei.xing
2023-05-26 7:38 ` [PATCH v4 02/13] common/idpf: support queue groups add/delete beilei.xing
@ 2023-05-26 7:38 ` beilei.xing
2023-05-26 7:38 ` [PATCH v4 04/13] net/cpfl: support hairpin queue capbility get beilei.xing
` (10 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-26 7:38 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds haipin queue group during vport init.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 133 +++++++++++++++++++++++++++++++++
drivers/net/cpfl/cpfl_ethdev.h | 18 +++++
drivers/net/cpfl/cpfl_rxtx.h | 7 ++
3 files changed, 158 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index e587155db6..c1273a7478 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -840,6 +840,20 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
return 0;
}
+static int
+cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
+{
+ struct virtchnl2_queue_group_id qg_ids[CPFL_P2P_NB_QUEUE_GRPS] = {0};
+ int ret = 0;
+
+ qg_ids[0].queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
+ qg_ids[0].queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
+ ret = idpf_vc_queue_grps_del(vport, CPFL_P2P_NB_QUEUE_GRPS, qg_ids);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to delete p2p queue groups");
+ return ret;
+}
+
static int
cpfl_dev_close(struct rte_eth_dev *dev)
{
@@ -848,7 +862,12 @@ cpfl_dev_close(struct rte_eth_dev *dev)
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
+
+ if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
+ cpfl_p2p_queue_grps_del(vport);
+
idpf_vport_deinit(vport);
+ rte_free(cpfl_vport->p2p_q_chunks_info);
adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
adapter->cur_vport_nb--;
@@ -1284,6 +1303,96 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter)
return vport_idx;
}
+static int
+cpfl_p2p_q_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *p2p_queue_grps_info,
+ uint8_t *p2p_q_vc_out_info)
+{
+ int ret;
+
+ p2p_queue_grps_info->vport_id = vport->vport_id;
+ p2p_queue_grps_info->qg_info.num_queue_groups = CPFL_P2P_NB_QUEUE_GRPS;
+ p2p_queue_grps_info->qg_info.groups[0].num_rx_q = CPFL_MAX_P2P_NB_QUEUES;
+ p2p_queue_grps_info->qg_info.groups[0].num_rx_bufq = CPFL_P2P_NB_RX_BUFQ;
+ p2p_queue_grps_info->qg_info.groups[0].num_tx_q = CPFL_MAX_P2P_NB_QUEUES;
+ p2p_queue_grps_info->qg_info.groups[0].num_tx_complq = CPFL_P2P_NB_TX_COMPLQ;
+ p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
+ p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
+ p2p_queue_grps_info->qg_info.groups[0].rx_q_grp_info.rss_lut_size = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.tx_tc = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.priority = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.is_sp = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.pir_weight = 0;
+
+ ret = idpf_vc_queue_grps_add(vport, p2p_queue_grps_info, p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to add p2p queue groups.");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
+ struct virtchnl2_add_queue_groups *p2p_q_vc_out_info)
+{
+ struct p2p_queue_chunks_info *p2p_q_chunks_info = cpfl_vport->p2p_q_chunks_info;
+ struct virtchnl2_queue_reg_chunks *vc_chunks_out;
+ int i, type;
+
+ if (p2p_q_vc_out_info->qg_info.groups[0].qg_id.queue_group_type !=
+ VIRTCHNL2_QUEUE_GROUP_P2P) {
+ PMD_DRV_LOG(ERR, "Add queue group response mismatch.");
+ return -EINVAL;
+ }
+
+ vc_chunks_out = &p2p_q_vc_out_info->qg_info.groups[0].chunks;
+
+ for (i = 0; i < vc_chunks_out->num_chunks; i++) {
+ type = vc_chunks_out->chunks[i].type;
+ switch (type) {
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ p2p_q_chunks_info->tx_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->tx_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->tx_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ p2p_q_chunks_info->rx_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->rx_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->rx_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ p2p_q_chunks_info->tx_compl_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->tx_compl_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->tx_compl_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ p2p_q_chunks_info->rx_buf_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->rx_buf_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->rx_buf_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported queue type");
+ break;
+ }
+ }
+
+ return 0;
+}
+
static int
cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
{
@@ -1293,6 +1402,8 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
struct cpfl_adapter_ext *adapter = param->adapter;
/* for sending create vport virtchnl msg prepare */
struct virtchnl2_create_vport create_vport_info;
+ struct virtchnl2_add_queue_groups p2p_queue_grps_info;
+ uint8_t p2p_q_vc_out_info[IDPF_DFLT_MBX_BUF_SIZE] = {0};
int ret = 0;
dev->dev_ops = &cpfl_eth_dev_ops;
@@ -1327,6 +1438,28 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
&dev->data->mac_addrs[0]);
+ if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
+ memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
+ ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to add p2p queue group.");
+ return 0;
+ }
+ cpfl_vport->p2p_q_chunks_info = rte_zmalloc(NULL,
+ sizeof(struct p2p_queue_chunks_info), 0);
+ if (cpfl_vport->p2p_q_chunks_info == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate p2p queue info.");
+ cpfl_p2p_queue_grps_del(vport);
+ return 0;
+ }
+ ret = cpfl_p2p_queue_info_init(cpfl_vport,
+ (struct virtchnl2_add_queue_groups *)p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to init p2p queue info.");
+ cpfl_p2p_queue_grps_del(vport);
+ }
+ }
+
return 0;
err_mac_addrs:
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 81fe9ac4c3..666d46a44a 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -56,6 +56,7 @@
/* Device IDs */
#define IDPF_DEV_ID_CPF 0x1453
+#define VIRTCHNL2_QUEUE_GROUP_P2P 0x100
struct cpfl_vport_param {
struct cpfl_adapter_ext *adapter;
@@ -69,8 +70,25 @@ struct cpfl_devargs {
uint16_t req_vport_nb;
};
+struct p2p_queue_chunks_info {
+ uint32_t tx_start_qid;
+ uint32_t rx_start_qid;
+ uint32_t tx_compl_start_qid;
+ uint32_t rx_buf_start_qid;
+
+ uint64_t tx_qtail_start;
+ uint32_t tx_qtail_spacing;
+ uint64_t rx_qtail_start;
+ uint32_t rx_qtail_spacing;
+ uint64_t tx_compl_qtail_start;
+ uint32_t tx_compl_qtail_spacing;
+ uint64_t rx_buf_qtail_start;
+ uint32_t rx_buf_qtail_spacing;
+};
+
struct cpfl_vport {
struct idpf_vport base;
+ struct p2p_queue_chunks_info *p2p_q_chunks_info;
};
struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index bfb9ad97bd..1fe65778f0 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -13,6 +13,13 @@
#define CPFL_MIN_RING_DESC 32
#define CPFL_MAX_RING_DESC 4096
#define CPFL_DMA_MEM_ALIGN 4096
+
+#define CPFL_MAX_P2P_NB_QUEUES 16
+#define CPFL_P2P_NB_RX_BUFQ 1
+#define CPFL_P2P_NB_TX_COMPLQ 1
+#define CPFL_P2P_NB_QUEUE_GRPS 1
+#define CPFL_P2P_QUEUE_GRP_ID 1
+
/* Base address of the HW descriptor ring should be 128B aligned. */
#define CPFL_RING_BASE_ALIGN 128
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v4 04/13] net/cpfl: support hairpin queue capbility get
2023-05-26 7:38 ` [PATCH v4 00/13] net/cpfl: add hairpin queue support beilei.xing
` (2 preceding siblings ...)
2023-05-26 7:38 ` [PATCH v4 03/13] net/cpfl: add haipin queue group during vport init beilei.xing
@ 2023-05-26 7:38 ` beilei.xing
2023-05-26 7:38 ` [PATCH v4 05/13] net/cpfl: support hairpin queue setup and release beilei.xing
` (9 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-26 7:38 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch adds hairpin_cap_get ops support.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 18 ++++++++++++++++++
drivers/net/cpfl/cpfl_rxtx.h | 3 +++
2 files changed, 21 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index c1273a7478..40b4515539 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -154,6 +154,23 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,
return rte_eth_linkstatus_set(dev, &new_link);
}
+static int
+cpfl_hairpin_cap_get(struct rte_eth_dev *dev,
+ struct rte_eth_hairpin_cap *cap)
+{
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+
+ if (cpfl_vport->p2p_q_chunks_info == NULL)
+ return -ENOTSUP;
+
+ cap->max_nb_queues = CPFL_MAX_P2P_NB_QUEUES;
+ cap->max_rx_2_tx = CPFL_MAX_HAIRPINQ_RX_2_TX;
+ cap->max_tx_2_rx = CPFL_MAX_HAIRPINQ_TX_2_RX;
+ cap->max_nb_desc = CPFL_MAX_HAIRPINQ_NB_DESC;
+
+ return 0;
+}
+
static int
cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
@@ -904,6 +921,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.xstats_get = cpfl_dev_xstats_get,
.xstats_get_names = cpfl_dev_xstats_get_names,
.xstats_reset = cpfl_dev_xstats_reset,
+ .hairpin_cap_get = cpfl_hairpin_cap_get,
};
static int
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 1fe65778f0..a4a164d462 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -14,6 +14,9 @@
#define CPFL_MAX_RING_DESC 4096
#define CPFL_DMA_MEM_ALIGN 4096
+#define CPFL_MAX_HAIRPINQ_RX_2_TX 1
+#define CPFL_MAX_HAIRPINQ_TX_2_RX 1
+#define CPFL_MAX_HAIRPINQ_NB_DESC 1024
#define CPFL_MAX_P2P_NB_QUEUES 16
#define CPFL_P2P_NB_RX_BUFQ 1
#define CPFL_P2P_NB_TX_COMPLQ 1
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v4 05/13] net/cpfl: support hairpin queue setup and release
2023-05-26 7:38 ` [PATCH v4 00/13] net/cpfl: add hairpin queue support beilei.xing
` (3 preceding siblings ...)
2023-05-26 7:38 ` [PATCH v4 04/13] net/cpfl: support hairpin queue capbility get beilei.xing
@ 2023-05-26 7:38 ` beilei.xing
2023-05-30 2:27 ` Liu, Mingxia
2023-05-26 7:38 ` [PATCH v4 06/13] common/idpf: add queue config API beilei.xing
` (8 subsequent siblings)
13 siblings, 1 reply; 164+ messages in thread
From: beilei.xing @ 2023-05-26 7:38 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
Support hairpin Rx/Tx queue setup and release.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 6 +
drivers/net/cpfl/cpfl_ethdev.h | 11 +
drivers/net/cpfl/cpfl_rxtx.c | 353 +++++++++++++++++++++++-
drivers/net/cpfl/cpfl_rxtx.h | 36 +++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 4 +
5 files changed, 409 insertions(+), 1 deletion(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 40b4515539..b17c538ec2 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -879,6 +879,10 @@ cpfl_dev_close(struct rte_eth_dev *dev)
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
+ if (cpfl_vport->p2p_mp) {
+ rte_mempool_free(cpfl_vport->p2p_mp);
+ cpfl_vport->p2p_mp = NULL;
+ }
if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
cpfl_p2p_queue_grps_del(vport);
@@ -922,6 +926,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.xstats_get_names = cpfl_dev_xstats_get_names,
.xstats_reset = cpfl_dev_xstats_reset,
.hairpin_cap_get = cpfl_hairpin_cap_get,
+ .rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
+ .tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
};
static int
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 666d46a44a..2e42354f70 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -89,6 +89,17 @@ struct p2p_queue_chunks_info {
struct cpfl_vport {
struct idpf_vport base;
struct p2p_queue_chunks_info *p2p_q_chunks_info;
+
+ struct rte_mempool *p2p_mp;
+
+ uint16_t nb_data_rxq;
+ uint16_t nb_data_txq;
+ uint16_t nb_p2p_rxq;
+ uint16_t nb_p2p_txq;
+
+ struct idpf_rx_queue *p2p_rx_bufq;
+ struct idpf_tx_queue *p2p_tx_complq;
+ bool p2p_manual_bind;
};
struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 04a51b8d15..9625629a20 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -10,6 +10,67 @@
#include "cpfl_rxtx.h"
#include "cpfl_rxtx_vec_common.h"
+static inline void
+cpfl_tx_hairpin_descq_reset(struct idpf_tx_queue *txq)
+{
+ uint32_t i, size;
+
+ if (!txq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+ return;
+ }
+
+ size = txq->nb_tx_desc * CPFL_P2P_DESC_LEN;
+ for (i = 0; i < size; i++)
+ ((volatile char *)txq->desc_ring)[i] = 0;
+}
+
+static inline void
+cpfl_tx_hairpin_complq_reset(struct idpf_tx_queue *cq)
+{
+ uint32_t i, size;
+
+ if (!cq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to complq is NULL");
+ return;
+ }
+
+ size = cq->nb_tx_desc * CPFL_P2P_DESC_LEN;
+ for (i = 0; i < size; i++)
+ ((volatile char *)cq->compl_ring)[i] = 0;
+}
+
+static inline void
+cpfl_rx_hairpin_descq_reset(struct idpf_rx_queue *rxq)
+{
+ uint16_t len;
+ uint32_t i;
+
+ if (!rxq)
+ return;
+
+ len = rxq->nb_rx_desc;
+ for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
+ ((volatile char *)rxq->rx_ring)[i] = 0;
+}
+
+static inline void
+cpfl_rx_hairpin_bufq_reset(struct idpf_rx_queue *rxbq)
+{
+ uint16_t len;
+ uint32_t i;
+
+ if (!rxbq)
+ return;
+
+ len = rxbq->nb_rx_desc;
+ for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
+ ((volatile char *)rxbq->rx_ring)[i] = 0;
+
+ rxbq->bufq1 = NULL;
+ rxbq->bufq2 = NULL;
+}
+
static uint64_t
cpfl_rx_offload_convert(uint64_t offload)
{
@@ -234,7 +295,10 @@ cpfl_rx_queue_release(void *rxq)
/* Split queue */
if (!q->adapter->is_rx_singleq) {
- if (q->bufq2)
+ /* the mz is shared between Tx/Rx hairpin, let Rx_release
+ * free the buf, q->bufq1->mz and q->mz.
+ */
+ if (!cpfl_rxq->hairpin_info.hairpin_q && q->bufq2)
cpfl_rx_split_bufq_release(q->bufq2);
if (q->bufq1)
@@ -385,6 +449,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
}
+ cpfl_vport->nb_data_rxq++;
rxq->q_set = true;
dev->data->rx_queues[queue_idx] = cpfl_rxq;
@@ -548,6 +613,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
queue_idx * vport->chunks_info.tx_qtail_spacing);
txq->ops = &def_txq_ops;
+ cpfl_vport->nb_data_txq++;
txq->q_set = true;
dev->data->tx_queues[queue_idx] = cpfl_txq;
@@ -562,6 +628,289 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return ret;
}
+static int
+cpfl_rx_hairpin_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,
+ uint16_t logic_qid, uint16_t nb_desc)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct rte_mempool *mp;
+ char pool_name[RTE_MEMPOOL_NAMESIZE];
+
+ mp = cpfl_vport->p2p_mp;
+ if (!mp) {
+ snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "p2p_mb_pool_%u",
+ dev->data->port_id);
+ mp = rte_pktmbuf_pool_create(pool_name, CPFL_P2P_NB_MBUF * CPFL_MAX_P2P_NB_QUEUES,
+ CPFL_P2P_CACHE_SIZE, 0, CPFL_P2P_MBUF_SIZE,
+ dev->device->numa_node);
+ if (!mp) {
+ PMD_INIT_LOG(ERR, "Failed to allocate mbuf pool for p2p");
+ return -ENOMEM;
+ }
+ cpfl_vport->p2p_mp = mp;
+ }
+
+ bufq->mp = mp;
+ bufq->nb_rx_desc = nb_desc;
+ bufq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_buf_start_qid,
+ logic_qid);
+ bufq->port_id = dev->data->port_id;
+ bufq->adapter = adapter;
+ bufq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
+
+ bufq->q_set = true;
+ bufq->ops = &def_rxq_ops;
+
+ return 0;
+}
+
+int
+cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf)
+{
+ struct cpfl_vport *cpfl_vport = (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter_base = vport->adapter;
+ uint16_t logic_qid = cpfl_vport->nb_p2p_rxq;
+ struct cpfl_rxq_hairpin_info *hairpin_info;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct idpf_rx_queue *bufq1 = NULL;
+ struct idpf_rx_queue *rxq;
+ uint16_t peer_port, peer_q;
+ uint16_t qid;
+ int ret;
+
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue.");
+ return -EINVAL;
+ }
+
+ if (conf->peer_count != 1) {
+ PMD_INIT_LOG(ERR, "Can't support Rx hairpin queue peer count %d", conf->peer_count);
+ return -EINVAL;
+ }
+
+ peer_port = conf->peers[0].port;
+ peer_q = conf->peers[0].queue;
+
+ if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
+ nb_desc > CPFL_MAX_RING_DESC ||
+ nb_desc < CPFL_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is invalid", nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory if needed */
+ if (dev->data->rx_queues[queue_idx]) {
+ cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* Setup Rx description queue */
+ cpfl_rxq = rte_zmalloc_socket("cpfl hairpin rxq",
+ sizeof(struct cpfl_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!cpfl_rxq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
+ return -ENOMEM;
+ }
+
+ rxq = &cpfl_rxq->base;
+ hairpin_info = &cpfl_rxq->hairpin_info;
+ rxq->nb_rx_desc = nb_desc * 2;
+ rxq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid, logic_qid);
+ rxq->port_id = dev->data->port_id;
+ rxq->adapter = adapter_base;
+ rxq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
+ hairpin_info->hairpin_q = true;
+ hairpin_info->peer_txp = peer_port;
+ hairpin_info->peer_txq_id = peer_q;
+
+ if (conf->manual_bind != 0)
+ cpfl_vport->p2p_manual_bind = true;
+ else
+ cpfl_vport->p2p_manual_bind = false;
+
+ if (cpfl_vport->p2p_rx_bufq == NULL) {
+ bufq1 = rte_zmalloc_socket("hairpin rx bufq1",
+ sizeof(struct idpf_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!bufq1) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for hairpin Rx buffer queue 1.");
+ ret = -ENOMEM;
+ goto err_alloc_bufq1;
+ }
+ qid = 2 * logic_qid;
+ ret = cpfl_rx_hairpin_bufq_setup(dev, bufq1, qid, nb_desc);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to setup hairpin Rx buffer queue 1");
+ ret = -EINVAL;
+ goto err_setup_bufq1;
+ }
+ cpfl_vport->p2p_rx_bufq = bufq1;
+ }
+
+ rxq->bufq1 = cpfl_vport->p2p_rx_bufq;
+ rxq->bufq2 = NULL;
+
+ cpfl_vport->nb_p2p_rxq++;
+ rxq->q_set = true;
+ dev->data->rx_queues[queue_idx] = cpfl_rxq;
+
+ return 0;
+
+err_setup_bufq1:
+ rte_free(bufq1);
+err_alloc_bufq1:
+ rte_free(rxq);
+
+ return ret;
+}
+
+int
+cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter_base = vport->adapter;
+ uint16_t logic_qid = cpfl_vport->nb_p2p_txq;
+ struct cpfl_txq_hairpin_info *hairpin_info;
+ struct idpf_hw *hw = &adapter_base->hw;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct idpf_tx_queue *txq, *cq;
+ const struct rte_memzone *mz;
+ uint32_t ring_size;
+ uint16_t peer_port, peer_q;
+
+ if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue.");
+ return -EINVAL;
+ }
+
+ if (conf->peer_count != 1) {
+ PMD_INIT_LOG(ERR, "Can't support Tx hairpin queue peer count %d", conf->peer_count);
+ return -EINVAL;
+ }
+
+ peer_port = conf->peers[0].port;
+ peer_q = conf->peers[0].queue;
+
+ if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
+ nb_desc > CPFL_MAX_RING_DESC ||
+ nb_desc < CPFL_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is invalid",
+ nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory if needed. */
+ if (dev->data->tx_queues[queue_idx]) {
+ cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate the TX queue data structure. */
+ cpfl_txq = rte_zmalloc_socket("cpfl hairpin txq",
+ sizeof(struct cpfl_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!cpfl_txq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+ return -ENOMEM;
+ }
+
+ txq = &cpfl_txq->base;
+ hairpin_info = &cpfl_txq->hairpin_info;
+ /* Txq ring length should be 2 times of Tx completion queue size. */
+ txq->nb_tx_desc = nb_desc * 2;
+ txq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_start_qid, logic_qid);
+ txq->port_id = dev->data->port_id;
+ hairpin_info->hairpin_q = true;
+ hairpin_info->peer_rxp = peer_port;
+ hairpin_info->peer_rxq_id = peer_q;
+
+ if (conf->manual_bind != 0)
+ cpfl_vport->p2p_manual_bind = true;
+ else
+ cpfl_vport->p2p_manual_bind = false;
+
+ /* Always Tx hairpin queue allocates Tx HW ring */
+ ring_size = RTE_ALIGN(txq->nb_tx_desc * CPFL_P2P_DESC_LEN,
+ CPFL_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_ring", logic_qid,
+ ring_size + CPFL_P2P_RING_BUF,
+ CPFL_RING_BASE_ALIGN,
+ dev->device->numa_node);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
+ rte_free(txq);
+ return -ENOMEM;
+ }
+
+ txq->tx_ring_phys_addr = mz->iova;
+ txq->desc_ring = mz->addr;
+ txq->mz = mz;
+
+ cpfl_tx_hairpin_descq_reset(txq);
+ txq->qtx_tail = hw->hw_addr +
+ cpfl_hw_qtail_get(cpfl_vport->p2p_q_chunks_info->tx_qtail_start,
+ logic_qid, cpfl_vport->p2p_q_chunks_info->tx_qtail_spacing);
+ txq->ops = &def_txq_ops;
+
+ if (cpfl_vport->p2p_tx_complq == NULL) {
+ cq = rte_zmalloc_socket("cpfl hairpin cq",
+ sizeof(struct idpf_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ dev->device->numa_node);
+ if (!cq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+ return -ENOMEM;
+ }
+
+ cq->nb_tx_desc = nb_desc;
+ cq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_compl_start_qid,
+ 0);
+ cq->port_id = dev->data->port_id;
+
+ /* Tx completion queue always allocates the HW ring */
+ ring_size = RTE_ALIGN(cq->nb_tx_desc * CPFL_P2P_DESC_LEN,
+ CPFL_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_compl_ring", logic_qid,
+ ring_size + CPFL_P2P_RING_BUF,
+ CPFL_RING_BASE_ALIGN,
+ dev->device->numa_node);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX completion queue");
+ rte_free(txq);
+ return -ENOMEM;
+ }
+ cq->tx_ring_phys_addr = mz->iova;
+ cq->compl_ring = mz->addr;
+ cq->mz = mz;
+
+ cpfl_tx_hairpin_complq_reset(cq);
+ cpfl_vport->p2p_tx_complq = cq;
+ }
+
+ txq->complq = cpfl_vport->p2p_tx_complq;
+
+ cpfl_vport->nb_p2p_txq++;
+ txq->q_set = true;
+ dev->data->tx_queues[queue_idx] = cpfl_txq;
+
+ return 0;
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
@@ -865,6 +1214,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ continue;
(void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index a4a164d462..06198d4aad 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -22,6 +22,11 @@
#define CPFL_P2P_NB_TX_COMPLQ 1
#define CPFL_P2P_NB_QUEUE_GRPS 1
#define CPFL_P2P_QUEUE_GRP_ID 1
+#define CPFL_P2P_DESC_LEN 16
+#define CPFL_P2P_NB_MBUF 4096
+#define CPFL_P2P_CACHE_SIZE 250
+#define CPFL_P2P_MBUF_SIZE 2048
+#define CPFL_P2P_RING_BUF 128
/* Base address of the HW descriptor ring should be 128B aligned. */
#define CPFL_RING_BASE_ALIGN 128
@@ -33,14 +38,40 @@
#define CPFL_SUPPORT_CHAIN_NUM 5
+struct cpfl_rxq_hairpin_info {
+ bool hairpin_q; /* if rx queue is a hairpin queue */
+ uint16_t peer_txp;
+ uint16_t peer_txq_id;
+};
+
struct cpfl_rx_queue {
struct idpf_rx_queue base;
+ struct cpfl_rxq_hairpin_info hairpin_info;
+};
+
+struct cpfl_txq_hairpin_info {
+ bool hairpin_q; /* if tx queue is a hairpin queue */
+ uint16_t peer_rxp;
+ uint16_t peer_rxq_id;
};
struct cpfl_tx_queue {
struct idpf_tx_queue base;
+ struct cpfl_txq_hairpin_info hairpin_info;
};
+static inline uint16_t
+cpfl_hw_qid_get(uint16_t start_qid, uint16_t offset)
+{
+ return start_qid + offset;
+}
+
+static inline uint64_t
+cpfl_hw_qtail_get(uint64_t tail_start, uint16_t offset, uint64_t tail_spacing)
+{
+ return tail_start + offset * tail_spacing;
+}
+
int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
@@ -59,4 +90,9 @@ void cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void cpfl_set_rx_function(struct rte_eth_dev *dev);
void cpfl_set_tx_function(struct rte_eth_dev *dev);
+int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, const struct rte_eth_hairpin_conf *conf);
+int cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf);
#endif /* _CPFL_RXTX_H_ */
diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
index 5690b17911..d8e9191196 100644
--- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
+++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
@@ -85,6 +85,8 @@ cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)
cpfl_rxq = dev->data->rx_queues[i];
default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ continue;
splitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq->base);
ret = splitq_ret && default_ret;
} else {
@@ -106,6 +108,8 @@ cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_tx_queues; i++) {
cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq->hairpin_info.hairpin_q)
+ continue;
ret = cpfl_tx_vec_queue_default(&cpfl_txq->base);
if (ret == CPFL_SCALAR_PATH)
return CPFL_SCALAR_PATH;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v4 06/13] common/idpf: add queue config API
2023-05-26 7:38 ` [PATCH v4 00/13] net/cpfl: add hairpin queue support beilei.xing
` (4 preceding siblings ...)
2023-05-26 7:38 ` [PATCH v4 05/13] net/cpfl: support hairpin queue setup and release beilei.xing
@ 2023-05-26 7:38 ` beilei.xing
2023-05-26 7:38 ` [PATCH v4 07/13] net/cpfl: support hairpin queue configuration beilei.xing
` (7 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-26 7:38 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx queue configuration APIs.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 70 ++++++++++++++++++++++
drivers/common/idpf/idpf_common_virtchnl.h | 6 ++
drivers/common/idpf/version.map | 2 +
3 files changed, 78 insertions(+)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index a3fe55c897..211b44a88e 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -1050,6 +1050,41 @@ idpf_vc_rxq_config(struct idpf_vport *vport, struct idpf_rx_queue *rxq)
return err;
}
+int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct virtchnl2_rxq_info *rxq_info,
+ uint16_t num_qs)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+ struct idpf_cmd_info args;
+ int size, err, i;
+
+ size = sizeof(*vc_rxqs) + (num_qs - 1) *
+ sizeof(struct virtchnl2_rxq_info);
+ vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+ if (vc_rxqs == NULL) {
+ DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+ err = -ENOMEM;
+ return err;
+ }
+ vc_rxqs->vport_id = vport->vport_id;
+ vc_rxqs->num_qinfo = num_qs;
+ memcpy(vc_rxqs->qinfo, rxq_info, num_qs * sizeof(struct virtchnl2_rxq_info));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+ args.in_args = (uint8_t *)vc_rxqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ rte_free(vc_rxqs);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+ return err;
+}
+
int
idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq)
{
@@ -1121,6 +1156,41 @@ idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq)
return err;
}
+int
+idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
+ uint16_t num_qs)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+ struct idpf_cmd_info args;
+ int size, err;
+
+ size = sizeof(*vc_txqs) + (num_qs - 1) * sizeof(struct virtchnl2_txq_info);
+ vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+ if (vc_txqs == NULL) {
+ DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+ err = -ENOMEM;
+ return err;
+ }
+ vc_txqs->vport_id = vport->vport_id;
+ vc_txqs->num_qinfo = num_qs;
+ memcpy(vc_txqs->qinfo, txq_info, num_qs * sizeof(struct virtchnl2_txq_info));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+ args.in_args = (uint8_t *)vc_txqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ rte_free(vc_txqs);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+ return err;
+}
+
int
idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
struct idpf_ctlq_msg *q_msg)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index 58b16e1c5d..db83761a5e 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -65,6 +65,12 @@ __rte_internal
int idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
u16 *buff_count, struct idpf_dma_mem **buffs);
__rte_internal
+int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct virtchnl2_rxq_info *rxq_info,
+ uint16_t num_qs);
+__rte_internal
+int idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
+ uint16_t num_qs);
+__rte_internal
int idpf_vc_queue_grps_del(struct idpf_vport *vport,
uint16_t num_q_grps,
struct virtchnl2_queue_group_id *qg_ids);
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 01d18f3f3f..17e77884ce 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -54,8 +54,10 @@ INTERNAL {
idpf_vc_rss_lut_get;
idpf_vc_rss_lut_set;
idpf_vc_rxq_config;
+ idpf_vc_rxq_config_by_info;
idpf_vc_stats_query;
idpf_vc_txq_config;
+ idpf_vc_txq_config_by_info;
idpf_vc_vectors_alloc;
idpf_vc_vectors_dealloc;
idpf_vc_vport_create;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v4 07/13] net/cpfl: support hairpin queue configuration
2023-05-26 7:38 ` [PATCH v4 00/13] net/cpfl: add hairpin queue support beilei.xing
` (5 preceding siblings ...)
2023-05-26 7:38 ` [PATCH v4 06/13] common/idpf: add queue config API beilei.xing
@ 2023-05-26 7:38 ` beilei.xing
2023-05-26 7:38 ` [PATCH v4 08/13] common/idpf: add switch queue API beilei.xing
` (6 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-26 7:38 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx hairpin queue configuration.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 136 +++++++++++++++++++++++++++++++--
drivers/net/cpfl/cpfl_rxtx.c | 80 +++++++++++++++++++
drivers/net/cpfl/cpfl_rxtx.h | 7 ++
3 files changed, 217 insertions(+), 6 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index b17c538ec2..a06def06d0 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -742,33 +742,157 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
return idpf_vport_irq_map_config(vport, nb_rx_queues);
}
+/* Update hairpin_info for dev's tx hairpin queue */
+static int
+cpfl_txq_hairpin_info_update(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port];
+ struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private;
+ struct cpfl_txq_hairpin_info *hairpin_info;
+ struct cpfl_tx_queue *cpfl_txq;
+ int i;
+
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ hairpin_info = &cpfl_txq->hairpin_info;
+ if (hairpin_info->peer_rxp != rx_port) {
+ PMD_DRV_LOG(ERR, "port %d is not the peer port", rx_port);
+ return -EINVAL;
+ }
+ hairpin_info->peer_rxq_id =
+ cpfl_hw_qid_get(cpfl_rx_vport->p2p_q_chunks_info->rx_start_qid,
+ hairpin_info->peer_rxq_id - cpfl_rx_vport->nb_data_rxq);
+ }
+
+ return 0;
+}
+
+/* Bind Rx hairpin queue's memory zone to peer Tx hairpin queue's memory zone */
+static void
+cpfl_rxq_hairpin_mz_bind(struct rte_eth_dev *dev)
+{
+ struct cpfl_vport *cpfl_rx_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_rx_vport->base;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_hw *hw = &adapter->hw;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct rte_eth_dev *peer_dev;
+ const struct rte_memzone *mz;
+ uint16_t peer_tx_port;
+ uint16_t peer_tx_qid;
+ int i;
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ peer_tx_port = cpfl_rxq->hairpin_info.peer_txp;
+ peer_tx_qid = cpfl_rxq->hairpin_info.peer_txq_id;
+ peer_dev = &rte_eth_devices[peer_tx_port];
+ cpfl_txq = peer_dev->data->tx_queues[peer_tx_qid];
+
+ /* bind rx queue */
+ mz = cpfl_txq->base.mz;
+ cpfl_rxq->base.rx_ring_phys_addr = mz->iova;
+ cpfl_rxq->base.rx_ring = mz->addr;
+ cpfl_rxq->base.mz = mz;
+
+ /* bind rx buffer queue */
+ mz = cpfl_txq->base.complq->mz;
+ cpfl_rxq->base.bufq1->rx_ring_phys_addr = mz->iova;
+ cpfl_rxq->base.bufq1->rx_ring = mz->addr;
+ cpfl_rxq->base.bufq1->mz = mz;
+ cpfl_rxq->base.bufq1->qrx_tail = hw->hw_addr +
+ cpfl_hw_qtail_get(cpfl_rx_vport->p2p_q_chunks_info->rx_buf_qtail_start,
+ 0, cpfl_rx_vport->p2p_q_chunks_info->rx_buf_qtail_spacing);
+ }
+}
+
static int
cpfl_start_queues(struct rte_eth_dev *dev)
{
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_rx_queue *cpfl_rxq;
struct cpfl_tx_queue *cpfl_txq;
+ int update_flag = 0;
int err = 0;
int i;
+ /* For normal data queues, configure, init and enale Txq.
+ * For non-manual bind hairpin queues, configure Txq.
+ */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
cpfl_txq = dev->data->tx_queues[i];
if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
continue;
- err = cpfl_tx_queue_start(dev, i);
+ if (!cpfl_txq->hairpin_info.hairpin_q) {
+ err = cpfl_tx_queue_start(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
+ return err;
+ }
+ } else if (!cpfl_vport->p2p_manual_bind) {
+ if (update_flag == 0) {
+ err = cpfl_txq_hairpin_info_update(dev,
+ cpfl_txq->hairpin_info.peer_rxp);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to update Tx hairpin queue info");
+ return err;
+ }
+ update_flag = 1;
+ }
+ err = cpfl_hairpin_txq_config(vport, cpfl_txq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u", i);
+ return err;
+ }
+ }
+ }
+
+ /* For non-manual bind hairpin queues, configure Tx completion queue first.*/
+ if (!cpfl_vport->p2p_manual_bind && cpfl_vport->p2p_tx_complq != NULL) {
+ err = cpfl_hairpin_tx_complq_config(cpfl_vport);
if (err != 0) {
- PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
+ PMD_DRV_LOG(ERR, "Fail to config Tx completion queue");
return err;
}
}
+ /* For non-manual bind hairpin queues, configure Rx buffer queue.*/
+ if (!cpfl_vport->p2p_manual_bind && cpfl_vport->p2p_rx_bufq != NULL) {
+ cpfl_rxq_hairpin_mz_bind(dev);
+ err = cpfl_hairpin_rx_bufq_config(cpfl_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue");
+ return err;
+ }
+ }
+
+ /* For normal data queues, configure, init and enale Rxq.
+ * For non-manual bind hairpin queues, configure Rxq, and then init Rxq.
+ */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)
continue;
- err = cpfl_rx_queue_start(dev, i);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
- return err;
+ if (!cpfl_rxq->hairpin_info.hairpin_q) {
+ err = cpfl_rx_queue_start(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
+ return err;
+ }
+ } else if (!cpfl_vport->p2p_manual_bind) {
+ err = cpfl_hairpin_rxq_config(vport, cpfl_rxq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue %u", i);
+ return err;
+ }
+ err = cpfl_rx_queue_init(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u", i);
+ return err;
+ }
}
}
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 9625629a20..702054d1c5 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -911,6 +911,86 @@ cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return 0;
}
+int
+cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport)
+{
+ struct idpf_rx_queue *rx_bufq = cpfl_vport->p2p_rx_bufq;
+ struct virtchnl2_rxq_info rxq_info[1] = {0};
+
+ rxq_info[0].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ rxq_info[0].queue_id = rx_bufq->queue_id;
+ rxq_info[0].ring_len = rx_bufq->nb_rx_desc;
+ rxq_info[0].dma_ring_addr = rx_bufq->rx_ring_phys_addr;
+ rxq_info[0].desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ rxq_info[0].rx_buffer_low_watermark = CPFL_RXBUF_LOW_WATERMARK;
+ rxq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ rxq_info[0].data_buffer_size = rx_bufq->rx_buf_len;
+ rxq_info[0].buffer_notif_stride = CPFL_RX_BUF_STRIDE;
+
+ return idpf_vc_rxq_config_by_info(&cpfl_vport->base, rxq_info, 1);
+}
+
+int
+cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq)
+{
+ struct virtchnl2_rxq_info rxq_info[1] = {0};
+ struct idpf_rx_queue *rxq = &cpfl_rxq->base;
+
+ rxq_info[0].type = VIRTCHNL2_QUEUE_TYPE_RX;
+ rxq_info[0].queue_id = rxq->queue_id;
+ rxq_info[0].ring_len = rxq->nb_rx_desc;
+ rxq_info[0].dma_ring_addr = rxq->rx_ring_phys_addr;
+ rxq_info[0].rx_bufq1_id = rxq->bufq1->queue_id;
+ rxq_info[0].max_pkt_size = vport->max_pkt_len;
+ rxq_info[0].desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ rxq_info[0].qflags |= VIRTCHNL2_RX_DESC_SIZE_16BYTE;
+
+ rxq_info[0].data_buffer_size = rxq->rx_buf_len;
+ rxq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ rxq_info[0].rx_buffer_low_watermark = CPFL_RXBUF_LOW_WATERMARK;
+
+ PMD_DRV_LOG(NOTICE, "hairpin: vport %u, Rxq id 0x%x",
+ vport->vport_id, rxq_info[0].queue_id);
+
+ return idpf_vc_rxq_config_by_info(vport, rxq_info, 1);
+}
+
+int
+cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport)
+{
+ struct idpf_tx_queue *tx_complq = cpfl_vport->p2p_tx_complq;
+ struct virtchnl2_txq_info txq_info[1] = {0};
+
+ txq_info[0].dma_ring_addr = tx_complq->tx_ring_phys_addr;
+ txq_info[0].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ txq_info[0].queue_id = tx_complq->queue_id;
+ txq_info[0].ring_len = tx_complq->nb_tx_desc;
+ txq_info[0].peer_rx_queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+ txq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ txq_info[0].sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+
+ return idpf_vc_txq_config_by_info(&cpfl_vport->base, txq_info, 1);
+}
+
+int
+cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq)
+{
+ struct idpf_tx_queue *txq = &cpfl_txq->base;
+ struct virtchnl2_txq_info txq_info[1] = {0};
+
+ txq_info[0].dma_ring_addr = txq->tx_ring_phys_addr;
+ txq_info[0].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ txq_info[0].queue_id = txq->queue_id;
+ txq_info[0].ring_len = txq->nb_tx_desc;
+ txq_info[0].tx_compl_queue_id = txq->complq->queue_id;
+ txq_info[0].relative_queue_id = txq->queue_id;
+ txq_info[0].peer_rx_queue_id = cpfl_txq->hairpin_info.peer_rxq_id;
+ txq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ txq_info[0].sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+
+ return idpf_vc_txq_config_by_info(vport, txq_info, 1);
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 06198d4aad..872ebc1bfd 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -32,12 +32,15 @@
#define CPFL_RING_BASE_ALIGN 128
#define CPFL_DEFAULT_RX_FREE_THRESH 32
+#define CPFL_RXBUF_LOW_WATERMARK 64
#define CPFL_DEFAULT_TX_RS_THRESH 32
#define CPFL_DEFAULT_TX_FREE_THRESH 32
#define CPFL_SUPPORT_CHAIN_NUM 5
+#define CPFL_RX_BUF_STRIDE 64
+
struct cpfl_rxq_hairpin_info {
bool hairpin_q; /* if rx queue is a hairpin queue */
uint16_t peer_txp;
@@ -95,4 +98,8 @@ int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
int cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc,
const struct rte_eth_hairpin_conf *conf);
+int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport);
+int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq);
+int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
+int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
#endif /* _CPFL_RXTX_H_ */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v4 08/13] common/idpf: add switch queue API
2023-05-26 7:38 ` [PATCH v4 00/13] net/cpfl: add hairpin queue support beilei.xing
` (6 preceding siblings ...)
2023-05-26 7:38 ` [PATCH v4 07/13] net/cpfl: support hairpin queue configuration beilei.xing
@ 2023-05-26 7:38 ` beilei.xing
2023-05-26 7:38 ` [PATCH v4 09/13] net/cpfl: support hairpin queue start/stop beilei.xing
` (5 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-26 7:38 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds idpf_vc_ena_dis_one_queue API.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 2 +-
drivers/common/idpf/idpf_common_virtchnl.h | 3 +++
drivers/common/idpf/version.map | 1 +
3 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index 211b44a88e..6455f640da 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -733,7 +733,7 @@ idpf_vc_vectors_dealloc(struct idpf_vport *vport)
return err;
}
-static int
+int
idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
uint32_t type, bool on)
{
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index db83761a5e..9ff5c38c26 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -71,6 +71,9 @@ __rte_internal
int idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
uint16_t num_qs);
__rte_internal
+int idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
+ uint32_t type, bool on);
+__rte_internal
int idpf_vc_queue_grps_del(struct idpf_vport *vport,
uint16_t num_q_grps,
struct virtchnl2_queue_group_id *qg_ids);
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 17e77884ce..25624732b0 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -40,6 +40,7 @@ INTERNAL {
idpf_vc_cmd_execute;
idpf_vc_ctlq_post_rx_buffs;
idpf_vc_ctlq_recv;
+ idpf_vc_ena_dis_one_queue;
idpf_vc_irq_map_unmap_config;
idpf_vc_one_msg_read;
idpf_vc_ptype_info_query;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v4 09/13] net/cpfl: support hairpin queue start/stop
2023-05-26 7:38 ` [PATCH v4 00/13] net/cpfl: add hairpin queue support beilei.xing
` (7 preceding siblings ...)
2023-05-26 7:38 ` [PATCH v4 08/13] common/idpf: add switch queue API beilei.xing
@ 2023-05-26 7:38 ` beilei.xing
2023-05-30 3:30 ` Liu, Mingxia
2023-05-26 7:38 ` [PATCH v4 10/13] common/idpf: add irq map config API beilei.xing
` (4 subsequent siblings)
13 siblings, 1 reply; 164+ messages in thread
From: beilei.xing @ 2023-05-26 7:38 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx hairpin queue start/stop.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 41 +++++++++
drivers/net/cpfl/cpfl_rxtx.c | 151 +++++++++++++++++++++++++++++----
drivers/net/cpfl/cpfl_rxtx.h | 14 +++
3 files changed, 188 insertions(+), 18 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index a06def06d0..8035878602 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -896,6 +896,47 @@ cpfl_start_queues(struct rte_eth_dev *dev)
}
}
+ /* For non-manual bind hairpin queues, enable Tx queue and Rx queue,
+ * then enable Tx completion queue and Rx buffer queue.
+ */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq->hairpin_info.hairpin_q && !cpfl_vport->p2p_manual_bind) {
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ i - cpfl_vport->nb_data_txq,
+ false, true);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u on",
+ i);
+ else
+ cpfl_txq->base.q_started = true;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q && !cpfl_vport->p2p_manual_bind) {
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ i - cpfl_vport->nb_data_rxq,
+ true, true);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u on",
+ i);
+ else
+ cpfl_rxq->base.q_started = true;
+ }
+ }
+
+ if (!cpfl_vport->p2p_manual_bind &&
+ cpfl_vport->p2p_tx_complq != NULL &&
+ cpfl_vport->p2p_rx_bufq != NULL) {
+ err = cpfl_switch_hairpin_bufq_complq(cpfl_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq and Rx bufq");
+ return err;
+ }
+ }
+
return err;
}
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 702054d1c5..38c48ad8c7 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -991,6 +991,81 @@ cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq
return idpf_vc_txq_config_by_info(vport, txq_info, 1);
}
+int
+cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ queue_id = cpfl_vport->p2p_tx_complq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+ if (err)
+ return err;
+
+ type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
+int
+cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t logic_qid,
+ bool rx, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = rx ? VIRTCHNL2_QUEUE_TYPE_RX : VIRTCHNL2_QUEUE_TYPE_TX;
+
+ if (type == VIRTCHNL2_QUEUE_TYPE_RX)
+ queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid, logic_qid);
+ else
+ queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_start_qid, logic_qid);
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+ if (err)
+ return err;
+
+ return err;
+}
+
+static int
+cpfl_alloc_split_p2p_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+ volatile struct virtchnl2_p2p_rx_buf_desc *rxd;
+ struct rte_mbuf *mbuf = NULL;
+ uint64_t dma_addr;
+ uint16_t i;
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ mbuf = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!mbuf)) {
+ PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+ return -ENOMEM;
+ }
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+ rxd = &((volatile struct virtchnl2_p2p_rx_buf_desc *)(rxq->rx_ring))[i];
+ rxd->reserve0 = 0;
+ rxd->pkt_addr = dma_addr;
+ }
+
+ rxq->nb_rx_hold = 0;
+ /* The value written in the RX buffer queue tail register, must be a multiple of 8.*/
+ rxq->rx_tail = rxq->nb_rx_desc - CPFL_HAIRPIN_Q_TAIL_AUX_VALUE;
+
+ return 0;
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
@@ -1044,22 +1119,31 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
IDPF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
} else {
/* Split queue */
- err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
- return err;
- }
- err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
- return err;
+ if (cpfl_rxq->hairpin_info.hairpin_q) {
+ err = cpfl_alloc_split_p2p_rxq_mbufs(rxq->bufq1);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate p2p RX buffer queue mbuf");
+ return err;
+ }
+ } else {
+ err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
+ return err;
+ }
+ err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
+ return err;
+ }
}
rte_wmb();
/* Init the RX tail register. */
IDPF_PCI_REG_WRITE(rxq->bufq1->qrx_tail, rxq->bufq1->rx_tail);
- IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail);
+ if (rxq->bufq2)
+ IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail);
}
return err;
@@ -1166,7 +1250,12 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return -EINVAL;
cpfl_rxq = dev->data->rx_queues[rx_queue_id];
- err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ rx_queue_id - cpfl_vport->nb_data_txq,
+ true, false);
+ else
+ err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
rx_queue_id);
@@ -1180,10 +1269,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
idpf_qc_single_rx_queue_reset(rxq);
} else {
rxq->bufq1->ops->release_mbufs(rxq->bufq1);
- rxq->bufq2->ops->release_mbufs(rxq->bufq2);
- idpf_qc_split_rx_queue_reset(rxq);
+ if (rxq->bufq2)
+ rxq->bufq2->ops->release_mbufs(rxq->bufq2);
+ if (cpfl_rxq->hairpin_info.hairpin_q) {
+ cpfl_rx_hairpin_descq_reset(rxq);
+ cpfl_rx_hairpin_bufq_reset(rxq->bufq1);
+ } else {
+ idpf_qc_split_rx_queue_reset(rxq);
+ }
}
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ if (!cpfl_rxq->hairpin_info.hairpin_q)
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1202,7 +1298,12 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
cpfl_txq = dev->data->tx_queues[tx_queue_id];
- err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
+ if (cpfl_txq->hairpin_info.hairpin_q)
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ tx_queue_id - cpfl_vport->nb_data_txq,
+ false, false);
+ else
+ err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
tx_queue_id);
@@ -1215,10 +1316,17 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
idpf_qc_single_tx_queue_reset(txq);
} else {
- idpf_qc_split_tx_descq_reset(txq);
- idpf_qc_split_tx_complq_reset(txq->complq);
+ if (cpfl_txq->hairpin_info.hairpin_q) {
+ cpfl_tx_hairpin_descq_reset(txq);
+ cpfl_tx_hairpin_complq_reset(txq->complq);
+ } else {
+ idpf_qc_split_tx_descq_reset(txq);
+ idpf_qc_split_tx_complq_reset(txq->complq);
+ }
}
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ if (!cpfl_txq->hairpin_info.hairpin_q)
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1238,10 +1346,17 @@ cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
void
cpfl_stop_queues(struct rte_eth_dev *dev)
{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
struct cpfl_rx_queue *cpfl_rxq;
struct cpfl_tx_queue *cpfl_txq;
int i;
+ if (cpfl_vport->p2p_rx_bufq != NULL) {
+ if (cpfl_switch_hairpin_bufq_complq(cpfl_vport, false) != 0)
+ PMD_DRV_LOG(ERR, "Failed to stop hairpin Tx complq and Rx bufq");
+ }
+
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
if (cpfl_rxq == NULL)
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 872ebc1bfd..42dfd07155 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -41,6 +41,17 @@
#define CPFL_RX_BUF_STRIDE 64
+/* The value written in the RX buffer queue tail register,
+ * and in WritePTR field in the TX completion queue context,
+ * must be a multiple of 8.
+ */
+#define CPFL_HAIRPIN_Q_TAIL_AUX_VALUE 8
+
+struct virtchnl2_p2p_rx_buf_desc {
+ __le64 reserve0;
+ __le64 pkt_addr; /* Packet buffer address */
+};
+
struct cpfl_rxq_hairpin_info {
bool hairpin_q; /* if rx queue is a hairpin queue */
uint16_t peer_txp;
@@ -102,4 +113,7 @@ int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq);
int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
+int cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t qid,
+ bool rx, bool on);
#endif /* _CPFL_RXTX_H_ */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v4 10/13] common/idpf: add irq map config API
2023-05-26 7:38 ` [PATCH v4 00/13] net/cpfl: add hairpin queue support beilei.xing
` (8 preceding siblings ...)
2023-05-26 7:38 ` [PATCH v4 09/13] net/cpfl: support hairpin queue start/stop beilei.xing
@ 2023-05-26 7:38 ` beilei.xing
2023-05-26 7:38 ` [PATCH v4 11/13] net/cpfl: enable write back based on ITR expire beilei.xing
` (3 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-26 7:38 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch supports idpf_vport_irq_map_config_by_qids API.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_device.c | 75 ++++++++++++++++++++++++
drivers/common/idpf/idpf_common_device.h | 4 ++
drivers/common/idpf/version.map | 1 +
3 files changed, 80 insertions(+)
diff --git a/drivers/common/idpf/idpf_common_device.c b/drivers/common/idpf/idpf_common_device.c
index dc47551b17..cc4207a46e 100644
--- a/drivers/common/idpf/idpf_common_device.c
+++ b/drivers/common/idpf/idpf_common_device.c
@@ -667,6 +667,81 @@ idpf_vport_irq_map_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
return ret;
}
+int
+idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport, uint32_t *qids, uint16_t nb_rx_queues)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_queue_vector *qv_map;
+ struct idpf_hw *hw = &adapter->hw;
+ uint32_t dynctl_val, itrn_val;
+ uint32_t dynctl_reg_start;
+ uint32_t itrn_reg_start;
+ uint16_t i;
+ int ret;
+
+ qv_map = rte_zmalloc("qv_map",
+ nb_rx_queues *
+ sizeof(struct virtchnl2_queue_vector), 0);
+ if (qv_map == NULL) {
+ DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
+ nb_rx_queues);
+ ret = -ENOMEM;
+ goto qv_map_alloc_err;
+ }
+
+ /* Rx interrupt disabled, Map interrupt only for writeback */
+
+ /* The capability flags adapter->caps.other_caps should be
+ * compared with bit VIRTCHNL2_CAP_WB_ON_ITR here. The if
+ * condition should be updated when the FW can return the
+ * correct flag bits.
+ */
+ dynctl_reg_start =
+ vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
+ itrn_reg_start =
+ vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
+ dynctl_val = IDPF_READ_REG(hw, dynctl_reg_start);
+ DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x", dynctl_val);
+ itrn_val = IDPF_READ_REG(hw, itrn_reg_start);
+ DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val);
+ /* Force write-backs by setting WB_ON_ITR bit in DYN_CTL
+ * register. WB_ON_ITR and INTENA are mutually exclusive
+ * bits. Setting WB_ON_ITR bits means TX and RX Descs
+ * are written back based on ITR expiration irrespective
+ * of INTENA setting.
+ */
+ /* TBD: need to tune INTERVAL value for better performance. */
+ itrn_val = (itrn_val == 0) ? IDPF_DFLT_INTERVAL : itrn_val;
+ dynctl_val = VIRTCHNL2_ITR_IDX_0 <<
+ PF_GLINT_DYN_CTL_ITR_INDX_S |
+ PF_GLINT_DYN_CTL_WB_ON_ITR_M |
+ itrn_val << PF_GLINT_DYN_CTL_INTERVAL_S;
+ IDPF_WRITE_REG(hw, dynctl_reg_start, dynctl_val);
+
+ for (i = 0; i < nb_rx_queues; i++) {
+ /* map all queues to the same vector */
+ qv_map[i].queue_id = qids[i];
+ qv_map[i].vector_id =
+ vport->recv_vectors->vchunks.vchunks->start_vector_id;
+ }
+ vport->qv_map = qv_map;
+
+ ret = idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, true);
+ if (ret != 0) {
+ DRV_LOG(ERR, "config interrupt mapping failed");
+ goto config_irq_map_err;
+ }
+
+ return 0;
+
+config_irq_map_err:
+ rte_free(vport->qv_map);
+ vport->qv_map = NULL;
+
+qv_map_alloc_err:
+ return ret;
+}
+
int
idpf_vport_irq_unmap_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
{
diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index 112367dae8..f767ea7cec 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -200,5 +200,9 @@ int idpf_vport_info_init(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_info);
__rte_internal
void idpf_vport_stats_update(struct virtchnl2_vport_stats *oes, struct virtchnl2_vport_stats *nes);
+__rte_internal
+int idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport,
+ uint32_t *qids,
+ uint16_t nb_rx_queues);
#endif /* _IDPF_COMMON_DEVICE_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 25624732b0..0729f6b912 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -69,6 +69,7 @@ INTERNAL {
idpf_vport_info_init;
idpf_vport_init;
idpf_vport_irq_map_config;
+ idpf_vport_irq_map_config_by_qids;
idpf_vport_irq_unmap_config;
idpf_vport_rss_config;
idpf_vport_stats_update;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v4 11/13] net/cpfl: enable write back based on ITR expire
2023-05-26 7:38 ` [PATCH v4 00/13] net/cpfl: add hairpin queue support beilei.xing
` (9 preceding siblings ...)
2023-05-26 7:38 ` [PATCH v4 10/13] common/idpf: add irq map config API beilei.xing
@ 2023-05-26 7:38 ` beilei.xing
2023-05-26 7:38 ` [PATCH v4 12/13] net/cpfl: support peer ports get beilei.xing
` (2 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-26 7:38 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch enables write back based on ITR expire
(WR_ON_ITR) for hairpin queues.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 8035878602..74f33e9814 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -735,11 +735,22 @@ cpfl_dev_configure(struct rte_eth_dev *dev)
static int
cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
{
+ uint32_t qids[CPFL_MAX_P2P_NB_QUEUES + IDPF_DEFAULT_RXQ_NUM] = {0};
struct cpfl_vport *cpfl_vport = dev->data->dev_private;
struct idpf_vport *vport = &cpfl_vport->base;
uint16_t nb_rx_queues = dev->data->nb_rx_queues;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i;
- return idpf_vport_irq_map_config(vport, nb_rx_queues);
+ for (i = 0; i < nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ qids[i] = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid,
+ (i - cpfl_vport->nb_data_rxq));
+ else
+ qids[i] = cpfl_hw_qid_get(vport->chunks_info.rx_start_qid, i);
+ }
+ return idpf_vport_irq_map_config_by_qids(vport, qids, nb_rx_queues);
}
/* Update hairpin_info for dev's tx hairpin queue */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v4 12/13] net/cpfl: support peer ports get
2023-05-26 7:38 ` [PATCH v4 00/13] net/cpfl: add hairpin queue support beilei.xing
` (10 preceding siblings ...)
2023-05-26 7:38 ` [PATCH v4 11/13] net/cpfl: enable write back based on ITR expire beilei.xing
@ 2023-05-26 7:38 ` beilei.xing
2023-05-26 7:38 ` [PATCH v4 13/13] net/cpfl: support hairpin bind/unbind beilei.xing
2023-05-31 10:18 ` [PATCH v5 00/13] net/cpfl: add hairpin queue support beilei.xing
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-26 7:38 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports get hairpin peer ports.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 40 ++++++++++++++++++++++++++++++++++
1 file changed, 40 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 74f33e9814..d6dc1672f1 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1075,6 +1075,45 @@ cpfl_dev_close(struct rte_eth_dev *dev)
return 0;
}
+static int
+cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
+ size_t len, uint32_t tx)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_tx_queue *txq;
+ struct idpf_rx_queue *rxq;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i, j;
+
+ if (len <= 0)
+ return -EINVAL;
+
+ if (cpfl_vport->p2p_q_chunks_info == NULL)
+ return -ENOTSUP;
+
+ if (tx > 0) {
+ for (i = cpfl_vport->nb_data_txq, j = 0; i < dev->data->nb_tx_queues; i++, j++) {
+ txq = dev->data->tx_queues[i];
+ if (txq == NULL)
+ return -EINVAL;
+ cpfl_txq = (struct cpfl_tx_queue *)txq;
+ peer_ports[j] = cpfl_txq->hairpin_info.peer_rxp;
+ }
+ } else if (tx == 0) {
+ for (i = cpfl_vport->nb_data_rxq, j = 0; i < dev->data->nb_rx_queues; i++, j++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq == NULL)
+ return -EINVAL;
+ cpfl_rxq = (struct cpfl_rx_queue *)rxq;
+ peer_ports[j] = cpfl_rxq->hairpin_info.peer_txp;
+ }
+ }
+
+ return j;
+}
+
static const struct eth_dev_ops cpfl_eth_dev_ops = {
.dev_configure = cpfl_dev_configure,
.dev_close = cpfl_dev_close,
@@ -1104,6 +1143,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.hairpin_cap_get = cpfl_hairpin_cap_get,
.rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
.tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
+ .hairpin_get_peer_ports = cpfl_hairpin_get_peer_ports,
};
static int
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v4 13/13] net/cpfl: support hairpin bind/unbind
2023-05-26 7:38 ` [PATCH v4 00/13] net/cpfl: add hairpin queue support beilei.xing
` (11 preceding siblings ...)
2023-05-26 7:38 ` [PATCH v4 12/13] net/cpfl: support peer ports get beilei.xing
@ 2023-05-26 7:38 ` beilei.xing
2023-05-30 3:59 ` Liu, Mingxia
2023-05-31 10:18 ` [PATCH v5 00/13] net/cpfl: add hairpin queue support beilei.xing
13 siblings, 1 reply; 164+ messages in thread
From: beilei.xing @ 2023-05-26 7:38 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports hairpin_bind/unbind ops.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 137 +++++++++++++++++++++++++++++++++
drivers/net/cpfl/cpfl_rxtx.c | 28 +++++++
drivers/net/cpfl/cpfl_rxtx.h | 2 +
3 files changed, 167 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index d6dc1672f1..4b70441e27 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1114,6 +1114,141 @@ cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
return j;
}
+static int
+cpfl_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct idpf_vport *tx_vport = &cpfl_tx_vport->base;
+ struct cpfl_vport *cpfl_rx_vport;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct rte_eth_dev *peer_dev;
+ struct idpf_vport *rx_vport;
+ int err = 0;
+ int i;
+
+ err = cpfl_txq_hairpin_info_update(dev, rx_port);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to update Tx hairpin queue info.");
+ return err;
+ }
+
+ /* configure hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ err = cpfl_hairpin_txq_config(tx_vport, cpfl_txq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u", i);
+ return err;
+ }
+ }
+
+ err = cpfl_hairpin_tx_complq_config(cpfl_tx_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Tx completion queue");
+ return err;
+ }
+
+ peer_dev = &rte_eth_devices[rx_port];
+ cpfl_rx_vport = (struct cpfl_vport *)peer_dev->data->dev_private;
+ rx_vport = &cpfl_rx_vport->base;
+ cpfl_rxq_hairpin_mz_bind(peer_dev);
+
+ err = cpfl_hairpin_rx_bufq_config(cpfl_rx_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue");
+ return err;
+ }
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ err = cpfl_hairpin_rxq_config(rx_vport, cpfl_rxq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue %u", i);
+ return err;
+ }
+ err = cpfl_rx_queue_init(peer_dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u", i);
+ return err;
+ }
+ }
+
+ /* enable hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport,
+ i - cpfl_tx_vport->nb_data_txq,
+ false, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u on",
+ i);
+ return err;
+ }
+ cpfl_txq->base.q_started = true;
+ }
+
+ err = cpfl_switch_hairpin_complq(cpfl_tx_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq");
+ return err;
+ }
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport,
+ i - cpfl_rx_vport->nb_data_rxq,
+ true, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u on",
+ i);
+ }
+ cpfl_rxq->base.q_started = true;
+ }
+
+ err = cpfl_switch_hairpin_bufq(cpfl_rx_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Rx buffer queue");
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+cpfl_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port];
+ struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i;
+
+ /* disable hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport,
+ i - cpfl_tx_vport->nb_data_txq,
+ false, false);
+ cpfl_txq->base.q_started = false;
+ }
+
+ cpfl_switch_hairpin_complq(cpfl_tx_vport, false);
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport,
+ i - cpfl_rx_vport->nb_data_rxq,
+ true, false);
+ cpfl_rxq->base.q_started = false;
+ }
+
+ cpfl_switch_hairpin_bufq(cpfl_rx_vport, false);
+
+ return 0;
+}
+
static const struct eth_dev_ops cpfl_eth_dev_ops = {
.dev_configure = cpfl_dev_configure,
.dev_close = cpfl_dev_close,
@@ -1144,6 +1279,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
.tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
.hairpin_get_peer_ports = cpfl_hairpin_get_peer_ports,
+ .hairpin_bind = cpfl_hairpin_bind,
+ .hairpin_unbind = cpfl_hairpin_unbind,
};
static int
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 38c48ad8c7..ef83a03c2b 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -1011,6 +1011,34 @@ cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool on)
return err;
}
+int
+cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ queue_id = cpfl_vport->p2p_tx_complq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
+int
+cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
int
cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t logic_qid,
bool rx, bool on)
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 42dfd07155..86e97541c4 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -114,6 +114,8 @@ int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl
int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
int cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on);
int cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t qid,
bool rx, bool on);
#endif /* _CPFL_RXTX_H_ */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH v4 05/13] net/cpfl: support hairpin queue setup and release
2023-05-26 7:38 ` [PATCH v4 05/13] net/cpfl: support hairpin queue setup and release beilei.xing
@ 2023-05-30 2:27 ` Liu, Mingxia
2023-05-30 2:49 ` Liu, Mingxia
0 siblings, 1 reply; 164+ messages in thread
From: Liu, Mingxia @ 2023-05-30 2:27 UTC (permalink / raw)
To: Xing, Beilei, Wu, Jingjing; +Cc: dev, Wang, Xiao W
> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Friday, May 26, 2023 3:39 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>
> Subject: [PATCH v4 05/13] net/cpfl: support hairpin queue setup and release
>
> From: Beilei Xing <beilei.xing@intel.com>
>
> Support hairpin Rx/Tx queue setup and release.
>
> Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/net/cpfl/cpfl_ethdev.c | 6 +
> drivers/net/cpfl/cpfl_ethdev.h | 11 +
> drivers/net/cpfl/cpfl_rxtx.c | 353 +++++++++++++++++++++++-
> drivers/net/cpfl/cpfl_rxtx.h | 36 +++
> drivers/net/cpfl/cpfl_rxtx_vec_common.h | 4 +
> 5 files changed, 409 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index
> 40b4515539..b17c538ec2 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.c
> +++ b/drivers/net/cpfl/cpfl_ethdev.c
> @@ -879,6 +879,10 @@ cpfl_dev_close(struct rte_eth_dev *dev)
> struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport-
> >adapter);
>
> cpfl_dev_stop(dev);
> + if (cpfl_vport->p2p_mp) {
> + rte_mempool_free(cpfl_vport->p2p_mp);
> + cpfl_vport->p2p_mp = NULL;
> + }
>
> if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
> cpfl_p2p_queue_grps_del(vport);
> @@ -922,6 +926,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
> .xstats_get_names = cpfl_dev_xstats_get_names,
> .xstats_reset = cpfl_dev_xstats_reset,
> .hairpin_cap_get = cpfl_hairpin_cap_get,
> + .rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
> + .tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
> };
>
> +int
> +cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
> + uint16_t nb_desc,
> + const struct rte_eth_hairpin_conf *conf) {
> + struct cpfl_vport *cpfl_vport = (struct cpfl_vport *)dev->data-
> >dev_private;
> + struct idpf_vport *vport = &cpfl_vport->base;
> + struct idpf_adapter *adapter_base = vport->adapter;
> + uint16_t logic_qid = cpfl_vport->nb_p2p_rxq;
> + struct cpfl_rxq_hairpin_info *hairpin_info;
> + struct cpfl_rx_queue *cpfl_rxq;
> + struct idpf_rx_queue *bufq1 = NULL;
> + struct idpf_rx_queue *rxq;
> + uint16_t peer_port, peer_q;
> + uint16_t qid;
> + int ret;
> +
> + if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
> + PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin
> queue.");
> + return -EINVAL;
> + }
> +
> + if (conf->peer_count != 1) {
> + PMD_INIT_LOG(ERR, "Can't support Rx hairpin queue peer
> count %d", conf->peer_count);
> + return -EINVAL;
> + }
> +
> + peer_port = conf->peers[0].port;
> + peer_q = conf->peers[0].queue;
> +
> + if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
> + nb_desc > CPFL_MAX_RING_DESC ||
> + nb_desc < CPFL_MIN_RING_DESC) {
> + PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is
> invalid", nb_desc);
> + return -EINVAL;
> + }
> +
> + /* Free memory if needed */
> + if (dev->data->rx_queues[queue_idx]) {
> + cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
> + dev->data->rx_queues[queue_idx] = NULL;
> + }
> +
> + /* Setup Rx description queue */
> + cpfl_rxq = rte_zmalloc_socket("cpfl hairpin rxq",
> + sizeof(struct cpfl_rx_queue),
> + RTE_CACHE_LINE_SIZE,
> + SOCKET_ID_ANY);
> + if (!cpfl_rxq) {
> + PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue
> data structure");
> + return -ENOMEM;
> + }
> +
> + rxq = &cpfl_rxq->base;
> + hairpin_info = &cpfl_rxq->hairpin_info;
> + rxq->nb_rx_desc = nb_desc * 2;
> + rxq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info-
> >rx_start_qid, logic_qid);
> + rxq->port_id = dev->data->port_id;
> + rxq->adapter = adapter_base;
> + rxq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
> + hairpin_info->hairpin_q = true;
> + hairpin_info->peer_txp = peer_port;
> + hairpin_info->peer_txq_id = peer_q;
> +
> + if (conf->manual_bind != 0)
> + cpfl_vport->p2p_manual_bind = true;
> + else
> + cpfl_vport->p2p_manual_bind = false;
> +
> + if (cpfl_vport->p2p_rx_bufq == NULL) {
> + bufq1 = rte_zmalloc_socket("hairpin rx bufq1",
> + sizeof(struct idpf_rx_queue),
> + RTE_CACHE_LINE_SIZE,
> + SOCKET_ID_ANY);
> + if (!bufq1) {
> + PMD_INIT_LOG(ERR, "Failed to allocate memory for
> hairpin Rx buffer queue 1.");
> + ret = -ENOMEM;
> + goto err_alloc_bufq1;
> + }
> + qid = 2 * logic_qid;
> + ret = cpfl_rx_hairpin_bufq_setup(dev, bufq1, qid, nb_desc);
> + if (ret) {
> + PMD_INIT_LOG(ERR, "Failed to setup hairpin Rx buffer
> queue 1");
> + ret = -EINVAL;
> + goto err_setup_bufq1;
> + }
> + cpfl_vport->p2p_rx_bufq = bufq1;
> + }
> +
> + rxq->bufq1 = cpfl_vport->p2p_rx_bufq;
> + rxq->bufq2 = NULL;
> +
> + cpfl_vport->nb_p2p_rxq++;
> + rxq->q_set = true;
> + dev->data->rx_queues[queue_idx] = cpfl_rxq;
> +
> + return 0;
> +
> +err_setup_bufq1:
> + rte_free(bufq1);
> +err_alloc_bufq1:
> + rte_free(rxq);
[Liu, Mingxia] Here should free cpfl_rxq, right?
> +
> + return ret;
> +}
> +
> +int
> +cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
> + uint16_t nb_desc,
> + const struct rte_eth_hairpin_conf *conf) {
> + struct cpfl_vport *cpfl_vport =
> + (struct cpfl_vport *)dev->data->dev_private;
> +
> + struct idpf_vport *vport = &cpfl_vport->base;
> + struct idpf_adapter *adapter_base = vport->adapter;
> + uint16_t logic_qid = cpfl_vport->nb_p2p_txq;
> + struct cpfl_txq_hairpin_info *hairpin_info;
> + struct idpf_hw *hw = &adapter_base->hw;
> + struct cpfl_tx_queue *cpfl_txq;
> + struct idpf_tx_queue *txq, *cq;
> + const struct rte_memzone *mz;
> + uint32_t ring_size;
> + uint16_t peer_port, peer_q;
> +
> + if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
> + PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin
> queue.");
> + return -EINVAL;
> + }
> +
> + if (conf->peer_count != 1) {
> + PMD_INIT_LOG(ERR, "Can't support Tx hairpin queue peer
> count %d", conf->peer_count);
> + return -EINVAL;
> + }
> +
> + peer_port = conf->peers[0].port;
> + peer_q = conf->peers[0].queue;
> +
> + if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
> + nb_desc > CPFL_MAX_RING_DESC ||
> + nb_desc < CPFL_MIN_RING_DESC) {
> + PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is
> invalid",
> + nb_desc);
> + return -EINVAL;
> + }
> +
> + /* Free memory if needed. */
> + if (dev->data->tx_queues[queue_idx]) {
> + cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
> + dev->data->tx_queues[queue_idx] = NULL;
> + }
> +
> + /* Allocate the TX queue data structure. */
> + cpfl_txq = rte_zmalloc_socket("cpfl hairpin txq",
> + sizeof(struct cpfl_tx_queue),
> + RTE_CACHE_LINE_SIZE,
> + SOCKET_ID_ANY);
> + if (!cpfl_txq) {
> + PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue
> structure");
> + return -ENOMEM;
> + }
> +
> + txq = &cpfl_txq->base;
> + hairpin_info = &cpfl_txq->hairpin_info;
> + /* Txq ring length should be 2 times of Tx completion queue size. */
> + txq->nb_tx_desc = nb_desc * 2;
> + txq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info-
> >tx_start_qid, logic_qid);
> + txq->port_id = dev->data->port_id;
> + hairpin_info->hairpin_q = true;
> + hairpin_info->peer_rxp = peer_port;
> + hairpin_info->peer_rxq_id = peer_q;
> +
> + if (conf->manual_bind != 0)
> + cpfl_vport->p2p_manual_bind = true;
> + else
> + cpfl_vport->p2p_manual_bind = false;
> +
> + /* Always Tx hairpin queue allocates Tx HW ring */
> + ring_size = RTE_ALIGN(txq->nb_tx_desc * CPFL_P2P_DESC_LEN,
> + CPFL_DMA_MEM_ALIGN);
> + mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_ring", logic_qid,
> + ring_size + CPFL_P2P_RING_BUF,
> + CPFL_RING_BASE_ALIGN,
> + dev->device->numa_node);
> + if (!mz) {
> + PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
> + rte_free(txq);
[Liu, Mingxia] Here should free cpfl_txq, right?
> + return -ENOMEM;
> + }
> +
> + txq->tx_ring_phys_addr = mz->iova;
> + txq->desc_ring = mz->addr;
> + txq->mz = mz;
> +
> + cpfl_tx_hairpin_descq_reset(txq);
> + txq->qtx_tail = hw->hw_addr +
> + cpfl_hw_qtail_get(cpfl_vport->p2p_q_chunks_info-
> >tx_qtail_start,
> + logic_qid, cpfl_vport->p2p_q_chunks_info-
> >tx_qtail_spacing);
> + txq->ops = &def_txq_ops;
> +
> + if (cpfl_vport->p2p_tx_complq == NULL) {
> + cq = rte_zmalloc_socket("cpfl hairpin cq",
> + sizeof(struct idpf_tx_queue),
> + RTE_CACHE_LINE_SIZE,
> + dev->device->numa_node);
> + if (!cq) {
> + PMD_INIT_LOG(ERR, "Failed to allocate memory for tx
> queue structure");
[Liu, Mingxia] Before returning, should free some resource, such as free cpfl_txq, right?
> + return -ENOMEM;
> + }
> +
> + cq->nb_tx_desc = nb_desc;
> + cq->queue_id = cpfl_hw_qid_get(cpfl_vport-
> >p2p_q_chunks_info->tx_compl_start_qid,
> + 0);
> + cq->port_id = dev->data->port_id;
> +
> + /* Tx completion queue always allocates the HW ring */
> + ring_size = RTE_ALIGN(cq->nb_tx_desc * CPFL_P2P_DESC_LEN,
> + CPFL_DMA_MEM_ALIGN);
> + mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_compl_ring",
> logic_qid,
> + ring_size + CPFL_P2P_RING_BUF,
> + CPFL_RING_BASE_ALIGN,
> + dev->device->numa_node);
> + if (!mz) {
> + PMD_INIT_LOG(ERR, "Failed to reserve DMA memory
> for TX completion queue");
> + rte_free(txq);
[Liu, Mingxia]Here should free cpfl_txq, right?In addition, should cq resource be released?
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH v4 05/13] net/cpfl: support hairpin queue setup and release
2023-05-30 2:27 ` Liu, Mingxia
@ 2023-05-30 2:49 ` Liu, Mingxia
2023-05-31 10:53 ` Xing, Beilei
0 siblings, 1 reply; 164+ messages in thread
From: Liu, Mingxia @ 2023-05-30 2:49 UTC (permalink / raw)
To: Xing, Beilei, Wu, Jingjing; +Cc: dev, Wang, Xiao W
> -----Original Message-----
> From: Liu, Mingxia
> Sent: Tuesday, May 30, 2023 10:27 AM
> To: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Wang, Xiao W <xiao.w.wang@intel.com>
> Subject: RE: [PATCH v4 05/13] net/cpfl: support hairpin queue setup and release
>
>
>
> > -----Original Message-----
> > From: Xing, Beilei <beilei.xing@intel.com>
> > Sent: Friday, May 26, 2023 3:39 PM
> > To: Wu, Jingjing <jingjing.wu@intel.com>
> > Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> > <beilei.xing@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>
> > Subject: [PATCH v4 05/13] net/cpfl: support hairpin queue setup and
> > release
> >
> > From: Beilei Xing <beilei.xing@intel.com>
> >
> > Support hairpin Rx/Tx queue setup and release.
> >
> > Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> > Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > ---
> > drivers/net/cpfl/cpfl_ethdev.c | 6 +
> > drivers/net/cpfl/cpfl_ethdev.h | 11 +
> > drivers/net/cpfl/cpfl_rxtx.c | 353 +++++++++++++++++++++++-
> > drivers/net/cpfl/cpfl_rxtx.h | 36 +++
> > drivers/net/cpfl/cpfl_rxtx_vec_common.h | 4 +
> > 5 files changed, 409 insertions(+), 1 deletion(-)
> >
> > diff --git a/drivers/net/cpfl/cpfl_ethdev.c
> > b/drivers/net/cpfl/cpfl_ethdev.c index
> > 40b4515539..b17c538ec2 100644
> > --- a/drivers/net/cpfl/cpfl_ethdev.c
> > +++ b/drivers/net/cpfl/cpfl_ethdev.c
> > @@ -879,6 +879,10 @@ cpfl_dev_close(struct rte_eth_dev *dev)
> > struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport-
> > >adapter);
> >
> > cpfl_dev_stop(dev);
> > + if (cpfl_vport->p2p_mp) {
> > + rte_mempool_free(cpfl_vport->p2p_mp);
> > + cpfl_vport->p2p_mp = NULL;
> > + }
> >
> > if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
> > cpfl_p2p_queue_grps_del(vport);
> > @@ -922,6 +926,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
> > .xstats_get_names = cpfl_dev_xstats_get_names,
> > .xstats_reset = cpfl_dev_xstats_reset,
> > .hairpin_cap_get = cpfl_hairpin_cap_get,
> > + .rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
> > + .tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
> > };
> >
> > +int
> > +cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
> > + uint16_t nb_desc,
> > + const struct rte_eth_hairpin_conf *conf) {
> > + struct cpfl_vport *cpfl_vport = (struct cpfl_vport *)dev->data-
> > >dev_private;
> > + struct idpf_vport *vport = &cpfl_vport->base;
> > + struct idpf_adapter *adapter_base = vport->adapter;
> > + uint16_t logic_qid = cpfl_vport->nb_p2p_rxq;
> > + struct cpfl_rxq_hairpin_info *hairpin_info;
> > + struct cpfl_rx_queue *cpfl_rxq;
> > + struct idpf_rx_queue *bufq1 = NULL;
> > + struct idpf_rx_queue *rxq;
> > + uint16_t peer_port, peer_q;
> > + uint16_t qid;
> > + int ret;
> > +
> > + if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
> > + PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin
> > queue.");
> > + return -EINVAL;
> > + }
> > +
> > + if (conf->peer_count != 1) {
> > + PMD_INIT_LOG(ERR, "Can't support Rx hairpin queue peer
> > count %d", conf->peer_count);
> > + return -EINVAL;
> > + }
> > +
> > + peer_port = conf->peers[0].port;
> > + peer_q = conf->peers[0].queue;
> > +
> > + if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
> > + nb_desc > CPFL_MAX_RING_DESC ||
> > + nb_desc < CPFL_MIN_RING_DESC) {
> > + PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is
> > invalid", nb_desc);
> > + return -EINVAL;
> > + }
> > +
> > + /* Free memory if needed */
> > + if (dev->data->rx_queues[queue_idx]) {
> > + cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
> > + dev->data->rx_queues[queue_idx] = NULL;
> > + }
> > +
> > + /* Setup Rx description queue */
> > + cpfl_rxq = rte_zmalloc_socket("cpfl hairpin rxq",
> > + sizeof(struct cpfl_rx_queue),
> > + RTE_CACHE_LINE_SIZE,
> > + SOCKET_ID_ANY);
> > + if (!cpfl_rxq) {
> > + PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue
> > data structure");
> > + return -ENOMEM;
> > + }
> > +
> > + rxq = &cpfl_rxq->base;
> > + hairpin_info = &cpfl_rxq->hairpin_info;
> > + rxq->nb_rx_desc = nb_desc * 2;
> > + rxq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info-
> > >rx_start_qid, logic_qid);
> > + rxq->port_id = dev->data->port_id;
> > + rxq->adapter = adapter_base;
> > + rxq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
> > + hairpin_info->hairpin_q = true;
> > + hairpin_info->peer_txp = peer_port;
> > + hairpin_info->peer_txq_id = peer_q;
> > +
> > + if (conf->manual_bind != 0)
> > + cpfl_vport->p2p_manual_bind = true;
> > + else
> > + cpfl_vport->p2p_manual_bind = false;
> > +
> > + if (cpfl_vport->p2p_rx_bufq == NULL) {
> > + bufq1 = rte_zmalloc_socket("hairpin rx bufq1",
> > + sizeof(struct idpf_rx_queue),
> > + RTE_CACHE_LINE_SIZE,
> > + SOCKET_ID_ANY);
> > + if (!bufq1) {
> > + PMD_INIT_LOG(ERR, "Failed to allocate memory for
> > hairpin Rx buffer queue 1.");
> > + ret = -ENOMEM;
> > + goto err_alloc_bufq1;
> > + }
> > + qid = 2 * logic_qid;
> > + ret = cpfl_rx_hairpin_bufq_setup(dev, bufq1, qid, nb_desc);
> > + if (ret) {
> > + PMD_INIT_LOG(ERR, "Failed to setup hairpin Rx buffer
> > queue 1");
> > + ret = -EINVAL;
> > + goto err_setup_bufq1;
> > + }
> > + cpfl_vport->p2p_rx_bufq = bufq1;
> > + }
> > +
> > + rxq->bufq1 = cpfl_vport->p2p_rx_bufq;
> > + rxq->bufq2 = NULL;
> > +
> > + cpfl_vport->nb_p2p_rxq++;
> > + rxq->q_set = true;
> > + dev->data->rx_queues[queue_idx] = cpfl_rxq;
> > +
> > + return 0;
> > +
> > +err_setup_bufq1:
> > + rte_free(bufq1);
> > +err_alloc_bufq1:
> > + rte_free(rxq);
> [Liu, Mingxia] Here should free cpfl_rxq, right?
> > +
> > + return ret;
> > +}
> > +
> > +int
> > +cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
> > + uint16_t nb_desc,
> > + const struct rte_eth_hairpin_conf *conf) {
> > + struct cpfl_vport *cpfl_vport =
> > + (struct cpfl_vport *)dev->data->dev_private;
> > +
> > + struct idpf_vport *vport = &cpfl_vport->base;
> > + struct idpf_adapter *adapter_base = vport->adapter;
> > + uint16_t logic_qid = cpfl_vport->nb_p2p_txq;
> > + struct cpfl_txq_hairpin_info *hairpin_info;
> > + struct idpf_hw *hw = &adapter_base->hw;
> > + struct cpfl_tx_queue *cpfl_txq;
> > + struct idpf_tx_queue *txq, *cq;
> > + const struct rte_memzone *mz;
> > + uint32_t ring_size;
> > + uint16_t peer_port, peer_q;
> > +
> > + if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
> > + PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin
> > queue.");
> > + return -EINVAL;
> > + }
> > +
> > + if (conf->peer_count != 1) {
> > + PMD_INIT_LOG(ERR, "Can't support Tx hairpin queue peer
> > count %d", conf->peer_count);
> > + return -EINVAL;
> > + }
> > +
> > + peer_port = conf->peers[0].port;
> > + peer_q = conf->peers[0].queue;
> > +
> > + if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
> > + nb_desc > CPFL_MAX_RING_DESC ||
> > + nb_desc < CPFL_MIN_RING_DESC) {
> > + PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is
> > invalid",
> > + nb_desc);
> > + return -EINVAL;
> > + }
> > +
> > + /* Free memory if needed. */
> > + if (dev->data->tx_queues[queue_idx]) {
> > + cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
> > + dev->data->tx_queues[queue_idx] = NULL;
> > + }
> > +
> > + /* Allocate the TX queue data structure. */
> > + cpfl_txq = rte_zmalloc_socket("cpfl hairpin txq",
> > + sizeof(struct cpfl_tx_queue),
> > + RTE_CACHE_LINE_SIZE,
> > + SOCKET_ID_ANY);
> > + if (!cpfl_txq) {
> > + PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue
> > structure");
> > + return -ENOMEM;
> > + }
> > +
> > + txq = &cpfl_txq->base;
> > + hairpin_info = &cpfl_txq->hairpin_info;
> > + /* Txq ring length should be 2 times of Tx completion queue size. */
> > + txq->nb_tx_desc = nb_desc * 2;
> > + txq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info-
> > >tx_start_qid, logic_qid);
> > + txq->port_id = dev->data->port_id;
> > + hairpin_info->hairpin_q = true;
> > + hairpin_info->peer_rxp = peer_port;
> > + hairpin_info->peer_rxq_id = peer_q;
> > +
> > + if (conf->manual_bind != 0)
> > + cpfl_vport->p2p_manual_bind = true;
> > + else
> > + cpfl_vport->p2p_manual_bind = false;
> > +
> > + /* Always Tx hairpin queue allocates Tx HW ring */
> > + ring_size = RTE_ALIGN(txq->nb_tx_desc * CPFL_P2P_DESC_LEN,
> > + CPFL_DMA_MEM_ALIGN);
> > + mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_ring", logic_qid,
> > + ring_size + CPFL_P2P_RING_BUF,
> > + CPFL_RING_BASE_ALIGN,
> > + dev->device->numa_node);
> > + if (!mz) {
> > + PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
> > + rte_free(txq);
> [Liu, Mingxia] Here should free cpfl_txq, right?
> > + return -ENOMEM;
> > + }
> > +
> > + txq->tx_ring_phys_addr = mz->iova;
> > + txq->desc_ring = mz->addr;
> > + txq->mz = mz;
> > +
> > + cpfl_tx_hairpin_descq_reset(txq);
> > + txq->qtx_tail = hw->hw_addr +
> > + cpfl_hw_qtail_get(cpfl_vport->p2p_q_chunks_info-
> > >tx_qtail_start,
> > + logic_qid, cpfl_vport->p2p_q_chunks_info-
> > >tx_qtail_spacing);
> > + txq->ops = &def_txq_ops;
> > +
> > + if (cpfl_vport->p2p_tx_complq == NULL) {
> > + cq = rte_zmalloc_socket("cpfl hairpin cq",
> > + sizeof(struct idpf_tx_queue),
> > + RTE_CACHE_LINE_SIZE,
> > + dev->device->numa_node);
> > + if (!cq) {
> > + PMD_INIT_LOG(ERR, "Failed to allocate memory for tx
> > queue structure");
> [Liu, Mingxia] Before returning, should free some resource, such as free cpfl_txq,
> right?
[Liu, Mingxia] In addition, should txq->mz be freed before release cpfl_txq ?
> > + return -ENOMEM;
> > + }
> > +
> > + cq->nb_tx_desc = nb_desc;
> > + cq->queue_id = cpfl_hw_qid_get(cpfl_vport-
> > >p2p_q_chunks_info->tx_compl_start_qid,
> > + 0);
> > + cq->port_id = dev->data->port_id;
> > +
> > + /* Tx completion queue always allocates the HW ring */
> > + ring_size = RTE_ALIGN(cq->nb_tx_desc * CPFL_P2P_DESC_LEN,
> > + CPFL_DMA_MEM_ALIGN);
> > + mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_compl_ring",
> > logic_qid,
> > + ring_size + CPFL_P2P_RING_BUF,
> > + CPFL_RING_BASE_ALIGN,
> > + dev->device->numa_node);
> > + if (!mz) {
> > + PMD_INIT_LOG(ERR, "Failed to reserve DMA memory
> > for TX completion queue");
> > + rte_free(txq);
>
> [Liu, Mingxia]Here should free cpfl_txq, right?In addition, should cq resource
> be released?
[Liu, Mingxia] In addition, should txq->mz be freed before release cpfl_txq ?
>
>
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH v4 09/13] net/cpfl: support hairpin queue start/stop
2023-05-26 7:38 ` [PATCH v4 09/13] net/cpfl: support hairpin queue start/stop beilei.xing
@ 2023-05-30 3:30 ` Liu, Mingxia
2023-05-31 10:53 ` Xing, Beilei
0 siblings, 1 reply; 164+ messages in thread
From: Liu, Mingxia @ 2023-05-30 3:30 UTC (permalink / raw)
To: Xing, Beilei, Wu, Jingjing; +Cc: dev, Wang, Xiao W
> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Friday, May 26, 2023 3:39 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>
> Subject: [PATCH v4 09/13] net/cpfl: support hairpin queue start/stop
>
> From: Beilei Xing <beilei.xing@intel.com>
>
> This patch supports Rx/Tx hairpin queue start/stop.
>
> Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/net/cpfl/cpfl_ethdev.c | 41 +++++++++
> drivers/net/cpfl/cpfl_rxtx.c | 151 +++++++++++++++++++++++++++++----
> drivers/net/cpfl/cpfl_rxtx.h | 14 +++
> 3 files changed, 188 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index
> a06def06d0..8035878602 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.c
> +++ b/drivers/net/cpfl/cpfl_ethdev.c
> @@ -896,6 +896,47 @@ cpfl_start_queues(struct rte_eth_dev *dev)
> }
> }
>
> + /* For non-manual bind hairpin queues, enable Tx queue and Rx queue,
> + * then enable Tx completion queue and Rx buffer queue.
> + */
> + for (i = 0; i < dev->data->nb_tx_queues; i++) {
[Liu, Mingxia] Better to use for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++), because when i < cpfl_tx_vport->nb_data_txq, (cpfl_txq->hairpin_info.hairpin_q && !cpfl_vport-
> >p2p_manual_bind) must be false, or (i - cpfl_vport->nb_data_txq) will < 0.
> + cpfl_txq = dev->data->tx_queues[i];
> + if (cpfl_txq->hairpin_info.hairpin_q && !cpfl_vport-
> >p2p_manual_bind) {
> + err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
> + i - cpfl_vport-
> >nb_data_txq,
> + false, true);
> + if (err)
> + PMD_DRV_LOG(ERR, "Failed to switch hairpin
> TX queue %u on",
> + i);
> + else
> + cpfl_txq->base.q_started = true;
> + }
> + }
> +
> + for (i = 0; i < dev->data->nb_rx_queues; i++) {
[Liu, Mingxia] Better to use for (i = cpfl_rx_vport->nb_data_rxq; i < dev->data->nb_rx_queues; i++), because when i < cpfl_rx_vport->nb_data_rxq, (cpfl_txq->hairpin_info.hairpin_q && !cpfl_vport-
> >p2p_manual_bind) must be false, or (i - cpfl_vport->nb_data_rxq) will < 0.
> + cpfl_rxq = dev->data->rx_queues[i];
> + if (cpfl_rxq->hairpin_info.hairpin_q && !cpfl_vport-
> >p2p_manual_bind) {
> + err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
> + i - cpfl_vport-
> >nb_data_rxq,
> + true, true);
> + if (err)
> + PMD_DRV_LOG(ERR, "Failed to switch hairpin
> RX queue %u on",
> + i);
> + else
> + cpfl_rxq->base.q_started = true;
> + }
> + }
> +
> + if (!cpfl_vport->p2p_manual_bind &&
> + cpfl_vport->p2p_tx_complq != NULL &&
> + cpfl_vport->p2p_rx_bufq != NULL) {
> + err = cpfl_switch_hairpin_bufq_complq(cpfl_vport, true);
> + if (err != 0) {
> + PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx
> complq and Rx bufq");
> + return err;
> + }
> + }
> +
> return err;
> }
>
> diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c index
> 702054d1c5..38c48ad8c7 100644
> --- a/drivers/net/cpfl/cpfl_rxtx.c
> +++ b/drivers/net/cpfl/cpfl_rxtx.c
> @@ -991,6 +991,81 @@ cpfl_hairpin_txq_config(struct idpf_vport *vport,
> struct cpfl_tx_queue *cpfl_txq
> return idpf_vc_txq_config_by_info(vport, txq_info, 1); }
>
> +int
> +cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool on)
> +{
> + struct idpf_vport *vport = &cpfl_vport->base;
> + uint32_t type;
> + int err, queue_id;
> +
> + type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
> + queue_id = cpfl_vport->p2p_tx_complq->queue_id;
> + err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
> + if (err)
> + return err;
> +
> + type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
> + queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
> + err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
> +
> + return err;
> +}
> +
> +int
> +cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t
> logic_qid,
> + bool rx, bool on)
> +{
> + struct idpf_vport *vport = &cpfl_vport->base;
> + uint32_t type;
> + int err, queue_id;
> +
> + type = rx ? VIRTCHNL2_QUEUE_TYPE_RX :
> VIRTCHNL2_QUEUE_TYPE_TX;
> +
> + if (type == VIRTCHNL2_QUEUE_TYPE_RX)
> + queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info-
> >rx_start_qid, logic_qid);
> + else
> + queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info-
> >tx_start_qid, logic_qid);
> + err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
> + if (err)
> + return err;
> +
> + return err;
> +}
> +
> +static int
> +cpfl_alloc_split_p2p_rxq_mbufs(struct idpf_rx_queue *rxq) {
> + volatile struct virtchnl2_p2p_rx_buf_desc *rxd;
> + struct rte_mbuf *mbuf = NULL;
> + uint64_t dma_addr;
> + uint16_t i;
> +
> + for (i = 0; i < rxq->nb_rx_desc; i++) {
> + mbuf = rte_mbuf_raw_alloc(rxq->mp);
> + if (unlikely(!mbuf)) {
> + PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
> + return -ENOMEM;
> + }
> +
> + rte_mbuf_refcnt_set(mbuf, 1);
> + mbuf->next = NULL;
> + mbuf->data_off = RTE_PKTMBUF_HEADROOM;
> + mbuf->nb_segs = 1;
> + mbuf->port = rxq->port_id;
> + dma_addr =
> rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
> +
> + rxd = &((volatile struct virtchnl2_p2p_rx_buf_desc *)(rxq-
> >rx_ring))[i];
> + rxd->reserve0 = 0;
> + rxd->pkt_addr = dma_addr;
> + }
> +
> + rxq->nb_rx_hold = 0;
> + /* The value written in the RX buffer queue tail register, must be a
> multiple of 8.*/
> + rxq->rx_tail = rxq->nb_rx_desc - CPFL_HAIRPIN_Q_TAIL_AUX_VALUE;
> +
> + return 0;
> +}
> +
> int
> cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id) { @@ -
> 1044,22 +1119,31 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t
> rx_queue_id)
> IDPF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
> } else {
> /* Split queue */
> - err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
> - if (err != 0) {
> - PMD_DRV_LOG(ERR, "Failed to allocate RX buffer
> queue mbuf");
> - return err;
> - }
> - err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
> - if (err != 0) {
> - PMD_DRV_LOG(ERR, "Failed to allocate RX buffer
> queue mbuf");
> - return err;
> + if (cpfl_rxq->hairpin_info.hairpin_q) {
> + err = cpfl_alloc_split_p2p_rxq_mbufs(rxq->bufq1);
> + if (err != 0) {
> + PMD_DRV_LOG(ERR, "Failed to allocate p2p RX
> buffer queue mbuf");
> + return err;
> + }
> + } else {
> + err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
> + if (err != 0) {
> + PMD_DRV_LOG(ERR, "Failed to allocate RX
> buffer queue mbuf");
> + return err;
> + }
> + err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
> + if (err != 0) {
> + PMD_DRV_LOG(ERR, "Failed to allocate RX
> buffer queue mbuf");
> + return err;
> + }
> }
>
> rte_wmb();
>
> /* Init the RX tail register. */
> IDPF_PCI_REG_WRITE(rxq->bufq1->qrx_tail, rxq->bufq1-
> >rx_tail);
> - IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2-
> >rx_tail);
> + if (rxq->bufq2)
> + IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq-
> >bufq2->rx_tail);
> }
>
> return err;
> @@ -1166,7 +1250,12 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev,
> uint16_t rx_queue_id)
> return -EINVAL;
>
> cpfl_rxq = dev->data->rx_queues[rx_queue_id];
> - err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
> + if (cpfl_rxq->hairpin_info.hairpin_q)
> + err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
> + rx_queue_id - cpfl_vport-
> >nb_data_txq,
> + true, false);
> + else
> + err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
> if (err != 0) {
> PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
> rx_queue_id);
> @@ -1180,10 +1269,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev,
> uint16_t rx_queue_id)
> idpf_qc_single_rx_queue_reset(rxq);
> } else {
> rxq->bufq1->ops->release_mbufs(rxq->bufq1);
> - rxq->bufq2->ops->release_mbufs(rxq->bufq2);
> - idpf_qc_split_rx_queue_reset(rxq);
> + if (rxq->bufq2)
> + rxq->bufq2->ops->release_mbufs(rxq->bufq2);
> + if (cpfl_rxq->hairpin_info.hairpin_q) {
> + cpfl_rx_hairpin_descq_reset(rxq);
> + cpfl_rx_hairpin_bufq_reset(rxq->bufq1);
> + } else {
> + idpf_qc_split_rx_queue_reset(rxq);
> + }
> }
> - dev->data->rx_queue_state[rx_queue_id] =
> RTE_ETH_QUEUE_STATE_STOPPED;
> + if (!cpfl_rxq->hairpin_info.hairpin_q)
> + dev->data->rx_queue_state[rx_queue_id] =
> RTE_ETH_QUEUE_STATE_STOPPED;
>
> return 0;
> }
> @@ -1202,7 +1298,12 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev,
> uint16_t tx_queue_id)
>
> cpfl_txq = dev->data->tx_queues[tx_queue_id];
>
> - err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
> + if (cpfl_txq->hairpin_info.hairpin_q)
> + err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
> + tx_queue_id - cpfl_vport-
> >nb_data_txq,
> + false, false);
> + else
> + err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
> if (err != 0) {
> PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
> tx_queue_id);
> @@ -1215,10 +1316,17 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev,
> uint16_t tx_queue_id)
> if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
> idpf_qc_single_tx_queue_reset(txq);
> } else {
> - idpf_qc_split_tx_descq_reset(txq);
> - idpf_qc_split_tx_complq_reset(txq->complq);
> + if (cpfl_txq->hairpin_info.hairpin_q) {
> + cpfl_tx_hairpin_descq_reset(txq);
> + cpfl_tx_hairpin_complq_reset(txq->complq);
> + } else {
> + idpf_qc_split_tx_descq_reset(txq);
> + idpf_qc_split_tx_complq_reset(txq->complq);
> + }
> }
> - dev->data->tx_queue_state[tx_queue_id] =
> RTE_ETH_QUEUE_STATE_STOPPED;
> +
> + if (!cpfl_txq->hairpin_info.hairpin_q)
> + dev->data->tx_queue_state[tx_queue_id] =
> RTE_ETH_QUEUE_STATE_STOPPED;
>
> return 0;
> }
> @@ -1238,10 +1346,17 @@ cpfl_dev_tx_queue_release(struct rte_eth_dev
> *dev, uint16_t qid) void cpfl_stop_queues(struct rte_eth_dev *dev) {
> + struct cpfl_vport *cpfl_vport =
> + (struct cpfl_vport *)dev->data->dev_private;
> struct cpfl_rx_queue *cpfl_rxq;
> struct cpfl_tx_queue *cpfl_txq;
> int i;
>
> + if (cpfl_vport->p2p_rx_bufq != NULL) {
> + if (cpfl_switch_hairpin_bufq_complq(cpfl_vport, false) != 0)
> + PMD_DRV_LOG(ERR, "Failed to stop hairpin Tx complq
> and Rx bufq");
> + }
> +
> for (i = 0; i < dev->data->nb_rx_queues; i++) {
> cpfl_rxq = dev->data->rx_queues[i];
> if (cpfl_rxq == NULL)
> diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h index
> 872ebc1bfd..42dfd07155 100644
> --- a/drivers/net/cpfl/cpfl_rxtx.h
> +++ b/drivers/net/cpfl/cpfl_rxtx.h
> @@ -41,6 +41,17 @@
>
> #define CPFL_RX_BUF_STRIDE 64
>
> +/* The value written in the RX buffer queue tail register,
> + * and in WritePTR field in the TX completion queue context,
> + * must be a multiple of 8.
> + */
> +#define CPFL_HAIRPIN_Q_TAIL_AUX_VALUE 8
> +
> +struct virtchnl2_p2p_rx_buf_desc {
> + __le64 reserve0;
> + __le64 pkt_addr; /* Packet buffer address */ };
> +
> struct cpfl_rxq_hairpin_info {
> bool hairpin_q; /* if rx queue is a hairpin queue */
> uint16_t peer_txp;
> @@ -102,4 +113,7 @@ int cpfl_hairpin_tx_complq_config(struct cpfl_vport
> *cpfl_vport); int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct
> cpfl_tx_queue *cpfl_txq); int cpfl_hairpin_rx_bufq_config(struct cpfl_vport
> *cpfl_vport); int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct
> cpfl_rx_queue *cpfl_rxq);
> +int cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool
> +on); int cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t
> qid,
> + bool rx, bool on);
> #endif /* _CPFL_RXTX_H_ */
> --
> 2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH v4 13/13] net/cpfl: support hairpin bind/unbind
2023-05-26 7:38 ` [PATCH v4 13/13] net/cpfl: support hairpin bind/unbind beilei.xing
@ 2023-05-30 3:59 ` Liu, Mingxia
2023-05-31 10:54 ` Xing, Beilei
0 siblings, 1 reply; 164+ messages in thread
From: Liu, Mingxia @ 2023-05-30 3:59 UTC (permalink / raw)
To: Xing, Beilei, Wu, Jingjing; +Cc: dev, Wang, Xiao W
> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Friday, May 26, 2023 3:39 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>
> Subject: [PATCH v4 13/13] net/cpfl: support hairpin bind/unbind
>
> From: Beilei Xing <beilei.xing@intel.com>
>
> This patch supports hairpin_bind/unbind ops.
>
> Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/net/cpfl/cpfl_ethdev.c | 137 +++++++++++++++++++++++++++++++++
> drivers/net/cpfl/cpfl_rxtx.c | 28 +++++++
> drivers/net/cpfl/cpfl_rxtx.h | 2 +
> 3 files changed, 167 insertions(+)
>
> diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index
> d6dc1672f1..4b70441e27 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.c
> +++ b/drivers/net/cpfl/cpfl_ethdev.c
> @@ -1114,6 +1114,141 @@ cpfl_hairpin_get_peer_ports(struct rte_eth_dev
> *dev, uint16_t *peer_ports,
> return j;
> }
>
>
> static int
> diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c index
> 38c48ad8c7..ef83a03c2b 100644
> --- a/drivers/net/cpfl/cpfl_rxtx.c
> +++ b/drivers/net/cpfl/cpfl_rxtx.c
> @@ -1011,6 +1011,34 @@ cpfl_switch_hairpin_bufq_complq(struct cpfl_vport
> *cpfl_vport, bool on)
> return err;
> }
>
> +int
> +cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on) {
> + struct idpf_vport *vport = &cpfl_vport->base;
> + uint32_t type;
> + int err, queue_id;
> +
> + type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
> + queue_id = cpfl_vport->p2p_tx_complq->queue_id;
> + err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
> +
> + return err;
> +}
> +
> +int
> +cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on) {
> + struct idpf_vport *vport = &cpfl_vport->base;
> + uint32_t type;
> + int err, queue_id;
> +
> + type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
> + queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
> + err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
> +
> + return err;
> +}
> +
[Liu, Mingxia] Can cpfl_switch_hairpin_bufq_complq() in patch 9/13 be optimized by calling cpfl_switch_hairpin_complq() and cpfl_switch_hairpin_bufq()?
> int
> cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t
> logic_qid,
> bool rx, bool on)
> diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h index
> 42dfd07155..86e97541c4 100644
> --- a/drivers/net/cpfl/cpfl_rxtx.h
> +++ b/drivers/net/cpfl/cpfl_rxtx.h
> @@ -114,6 +114,8 @@ int cpfl_hairpin_txq_config(struct idpf_vport *vport,
> struct cpfl_tx_queue *cpfl int cpfl_hairpin_rx_bufq_config(struct cpfl_vport
> *cpfl_vport); int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct
> cpfl_rx_queue *cpfl_rxq); int cpfl_switch_hairpin_bufq_complq(struct
> cpfl_vport *cpfl_vport, bool on);
> +int cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on);
> +int cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on);
> int cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t qid,
> bool rx, bool on);
> #endif /* _CPFL_RXTX_H_ */
> --
> 2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v5 00/13] net/cpfl: add hairpin queue support
2023-05-26 7:38 ` [PATCH v4 00/13] net/cpfl: add hairpin queue support beilei.xing
` (12 preceding siblings ...)
2023-05-26 7:38 ` [PATCH v4 13/13] net/cpfl: support hairpin bind/unbind beilei.xing
@ 2023-05-31 10:18 ` beilei.xing
2023-05-31 10:18 ` [PATCH v5 01/13] net/cpfl: refine structures beilei.xing
` (13 more replies)
13 siblings, 14 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:18 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patchset adds hairpin queue support.
v2 changes:
- change hairpin rx queus configuration sequence.
- code refine.
v3 changes:
- Refine the patchset based on the latest code.
v4 change:
- Remove hairpin rx buffer queue's sw_ring.
- Change hairpin rx queus configuration sequence in cpfl_hairpin_bind function.
- Refind hairpin queue setup and release.
v5 change:
- Fix memory leak during queue setup.
- Refine hairpin Rxq/Txq start/stop.
Beilei Xing (13):
net/cpfl: refine structures
common/idpf: support queue groups add/delete
net/cpfl: add haipin queue group during vport init
net/cpfl: support hairpin queue capbility get
net/cpfl: support hairpin queue setup and release
common/idpf: add queue config API
net/cpfl: support hairpin queue configuration
common/idpf: add switch queue API
net/cpfl: support hairpin queue start/stop
common/idpf: add irq map config API
net/cpfl: enable write back based on ITR expire
net/cpfl: support peer ports get
net/cpfl: support hairpin bind/unbind
drivers/common/idpf/idpf_common_device.c | 75 ++
drivers/common/idpf/idpf_common_device.h | 4 +
drivers/common/idpf/idpf_common_virtchnl.c | 138 +++-
drivers/common/idpf/idpf_common_virtchnl.h | 18 +
drivers/common/idpf/version.map | 6 +
drivers/net/cpfl/cpfl_ethdev.c | 610 ++++++++++++++--
drivers/net/cpfl/cpfl_ethdev.h | 35 +-
drivers/net/cpfl/cpfl_rxtx.c | 781 +++++++++++++++++++--
drivers/net/cpfl/cpfl_rxtx.h | 76 ++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 21 +-
10 files changed, 1645 insertions(+), 119 deletions(-)
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v5 01/13] net/cpfl: refine structures
2023-05-31 10:18 ` [PATCH v5 00/13] net/cpfl: add hairpin queue support beilei.xing
@ 2023-05-31 10:18 ` beilei.xing
2023-05-31 10:18 ` [PATCH v5 02/13] common/idpf: support queue groups add/delete beilei.xing
` (12 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:18 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch refines some structures to support hairpin queue,
cpfl_rx_queue/cpfl_tx_queue/cpfl_vport.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 85 +++++++-----
drivers/net/cpfl/cpfl_ethdev.h | 6 +-
drivers/net/cpfl/cpfl_rxtx.c | 175 +++++++++++++++++-------
drivers/net/cpfl/cpfl_rxtx.h | 8 ++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 17 +--
5 files changed, 196 insertions(+), 95 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 7528a14d05..e587155db6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -124,7 +124,8 @@ static int
cpfl_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct rte_eth_link new_link;
unsigned int i;
@@ -156,7 +157,8 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,
static int
cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
dev_info->max_rx_queues = base->caps.max_rx_q;
@@ -216,7 +218,8 @@ cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
static int
cpfl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
/* mtu setting is forbidden if port is start */
if (dev->data->dev_started) {
@@ -256,12 +259,12 @@ static uint64_t
cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
{
uint64_t mbuf_alloc_failed = 0;
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i = 0;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- mbuf_alloc_failed += __atomic_load_n(&rxq->rx_stats.mbuf_alloc_failed,
+ cpfl_rxq = dev->data->rx_queues[i];
+ mbuf_alloc_failed += __atomic_load_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,
__ATOMIC_RELAXED);
}
@@ -271,8 +274,8 @@ cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
static int
cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
int ret;
@@ -305,20 +308,20 @@ cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
static void
cpfl_reset_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- __atomic_store_n(&rxq->rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
+ cpfl_rxq = dev->data->rx_queues[i];
+ __atomic_store_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
}
}
static int
cpfl_dev_stats_reset(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
int ret;
@@ -343,8 +346,8 @@ static int cpfl_dev_xstats_reset(struct rte_eth_dev *dev)
static int cpfl_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned int n)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
unsigned int i;
int ret;
@@ -459,7 +462,8 @@ cpfl_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t idx, shift;
int ret = 0;
@@ -498,7 +502,8 @@ cpfl_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t idx, shift;
int ret = 0;
@@ -536,7 +541,8 @@ static int
cpfl_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
int ret = 0;
@@ -601,7 +607,8 @@ static int
cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
int ret = 0;
@@ -638,7 +645,8 @@ cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,
static int
cpfl_dev_configure(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct rte_eth_conf *conf = &dev->data->dev_conf;
struct idpf_adapter *base = vport->adapter;
int ret;
@@ -710,7 +718,8 @@ cpfl_dev_configure(struct rte_eth_dev *dev)
static int
cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
uint16_t nb_rx_queues = dev->data->nb_rx_queues;
return idpf_vport_irq_map_config(vport, nb_rx_queues);
@@ -719,14 +728,14 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
static int
cpfl_start_queues(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
- struct idpf_tx_queue *txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
int err = 0;
int i;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL || txq->tx_deferred_start)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
continue;
err = cpfl_tx_queue_start(dev, i);
if (err != 0) {
@@ -736,8 +745,8 @@ cpfl_start_queues(struct rte_eth_dev *dev)
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- if (rxq == NULL || rxq->rx_deferred_start)
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)
continue;
err = cpfl_rx_queue_start(dev, i);
if (err != 0) {
@@ -752,7 +761,8 @@ cpfl_start_queues(struct rte_eth_dev *dev)
static int
cpfl_dev_start(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(base);
uint16_t num_allocated_vectors = base->caps.num_allocated_vectors;
@@ -813,7 +823,8 @@ cpfl_dev_start(struct rte_eth_dev *dev)
static int
cpfl_dev_stop(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
if (dev->data->dev_started == 0)
return 0;
@@ -832,7 +843,8 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
static int
cpfl_dev_close(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
@@ -842,7 +854,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
adapter->cur_vport_nb--;
dev->data->dev_private = NULL;
adapter->vports[vport->sw_idx] = NULL;
- rte_free(vport);
+ rte_free(cpfl_vport);
return 0;
}
@@ -1047,7 +1059,7 @@ cpfl_find_vport(struct cpfl_adapter_ext *adapter, uint32_t vport_id)
int i;
for (i = 0; i < adapter->cur_vport_nb; i++) {
- vport = adapter->vports[i];
+ vport = &adapter->vports[i]->base;
if (vport->vport_id != vport_id)
continue;
else
@@ -1275,7 +1287,8 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter)
static int
cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_vport_param *param = init_params;
struct cpfl_adapter_ext *adapter = param->adapter;
/* for sending create vport virtchnl msg prepare */
@@ -1300,7 +1313,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
goto err;
}
- adapter->vports[param->idx] = vport;
+ adapter->vports[param->idx] = cpfl_vport;
adapter->cur_vports |= RTE_BIT32(param->devarg_id);
adapter->cur_vport_nb++;
@@ -1415,7 +1428,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
snprintf(name, sizeof(name), "cpfl_%s_vport_0",
pci_dev->device.name);
retval = rte_eth_dev_create(&pci_dev->device, name,
- sizeof(struct idpf_vport),
+ sizeof(struct cpfl_vport),
NULL, NULL, cpfl_dev_vport_init,
&vport_param);
if (retval != 0)
@@ -1433,7 +1446,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
pci_dev->device.name,
devargs.req_vports[i]);
retval = rte_eth_dev_create(&pci_dev->device, name,
- sizeof(struct idpf_vport),
+ sizeof(struct cpfl_vport),
NULL, NULL, cpfl_dev_vport_init,
&vport_param);
if (retval != 0)
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 200dfcac02..81fe9ac4c3 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -69,13 +69,17 @@ struct cpfl_devargs {
uint16_t req_vport_nb;
};
+struct cpfl_vport {
+ struct idpf_vport base;
+};
+
struct cpfl_adapter_ext {
TAILQ_ENTRY(cpfl_adapter_ext) next;
struct idpf_adapter base;
char name[CPFL_ADAPTER_NAME_LEN];
- struct idpf_vport **vports;
+ struct cpfl_vport **vports;
uint16_t max_vport_nb;
uint16_t cur_vports; /* bit mask of created vport */
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 75021c3c54..04a51b8d15 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -128,7 +128,8 @@ cpfl_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *rxq,
uint16_t nb_desc, unsigned int socket_id,
struct rte_mempool *mp, uint8_t bufq_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct idpf_hw *hw = &base->hw;
const struct rte_memzone *mz;
@@ -220,15 +221,69 @@ cpfl_rx_split_bufq_release(struct idpf_rx_queue *bufq)
rte_free(bufq);
}
+static void
+cpfl_rx_queue_release(void *rxq)
+{
+ struct cpfl_rx_queue *cpfl_rxq = rxq;
+ struct idpf_rx_queue *q = NULL;
+
+ if (cpfl_rxq == NULL)
+ return;
+
+ q = &cpfl_rxq->base;
+
+ /* Split queue */
+ if (!q->adapter->is_rx_singleq) {
+ if (q->bufq2)
+ cpfl_rx_split_bufq_release(q->bufq2);
+
+ if (q->bufq1)
+ cpfl_rx_split_bufq_release(q->bufq1);
+
+ rte_free(cpfl_rxq);
+ return;
+ }
+
+ /* Single queue */
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(cpfl_rxq);
+}
+
+static void
+cpfl_tx_queue_release(void *txq)
+{
+ struct cpfl_tx_queue *cpfl_txq = txq;
+ struct idpf_tx_queue *q = NULL;
+
+ if (cpfl_txq == NULL)
+ return;
+
+ q = &cpfl_txq->base;
+
+ if (q->complq) {
+ rte_memzone_free(q->complq->mz);
+ rte_free(q->complq);
+ }
+
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(cpfl_txq);
+}
+
int
cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct idpf_hw *hw = &base->hw;
+ struct cpfl_rx_queue *cpfl_rxq;
const struct rte_memzone *mz;
struct idpf_rx_queue *rxq;
uint16_t rx_free_thresh;
@@ -248,21 +303,23 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Free memory if needed */
if (dev->data->rx_queues[queue_idx] != NULL) {
- idpf_qc_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
dev->data->rx_queues[queue_idx] = NULL;
}
/* Setup Rx queue */
- rxq = rte_zmalloc_socket("cpfl rxq",
- sizeof(struct idpf_rx_queue),
+ cpfl_rxq = rte_zmalloc_socket("cpfl rxq",
+ sizeof(struct cpfl_rx_queue),
RTE_CACHE_LINE_SIZE,
socket_id);
- if (rxq == NULL) {
+ if (cpfl_rxq == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
ret = -ENOMEM;
goto err_rxq_alloc;
}
+ rxq = &cpfl_rxq->base;
+
is_splitq = !!(vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
rxq->mp = mp;
@@ -329,7 +386,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
rxq->q_set = true;
- dev->data->rx_queues[queue_idx] = rxq;
+ dev->data->rx_queues[queue_idx] = cpfl_rxq;
return 0;
@@ -349,7 +406,8 @@ cpfl_tx_complq_setup(struct rte_eth_dev *dev, struct idpf_tx_queue *txq,
uint16_t queue_idx, uint16_t nb_desc,
unsigned int socket_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
const struct rte_memzone *mz;
struct idpf_tx_queue *cq;
int ret;
@@ -397,9 +455,11 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t tx_rs_thresh, tx_free_thresh;
+ struct cpfl_tx_queue *cpfl_txq;
struct idpf_hw *hw = &base->hw;
const struct rte_memzone *mz;
struct idpf_tx_queue *txq;
@@ -419,21 +479,23 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Free memory if needed. */
if (dev->data->tx_queues[queue_idx] != NULL) {
- idpf_qc_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
dev->data->tx_queues[queue_idx] = NULL;
}
/* Allocate the TX queue data structure. */
- txq = rte_zmalloc_socket("cpfl txq",
- sizeof(struct idpf_tx_queue),
+ cpfl_txq = rte_zmalloc_socket("cpfl txq",
+ sizeof(struct cpfl_tx_queue),
RTE_CACHE_LINE_SIZE,
socket_id);
- if (txq == NULL) {
+ if (cpfl_txq == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
ret = -ENOMEM;
goto err_txq_alloc;
}
+ txq = &cpfl_txq->base;
+
is_splitq = !!(vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
txq->nb_tx_desc = nb_desc;
@@ -487,7 +549,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
queue_idx * vport->chunks_info.tx_qtail_spacing);
txq->ops = &def_txq_ops;
txq->q_set = true;
- dev->data->tx_queues[queue_idx] = txq;
+ dev->data->tx_queues[queue_idx] = cpfl_txq;
return 0;
@@ -503,6 +565,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
+ struct cpfl_rx_queue *cpfl_rxq;
struct idpf_rx_queue *rxq;
uint16_t max_pkt_len;
uint32_t frame_size;
@@ -511,7 +574,8 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (rx_queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
- rxq = dev->data->rx_queues[rx_queue_id];
+ cpfl_rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = &cpfl_rxq->base;
if (rxq == NULL || !rxq->q_set) {
PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
@@ -575,9 +639,10 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_rx_queue *rxq =
- dev->data->rx_queues[rx_queue_id];
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq = dev->data->rx_queues[rx_queue_id];
+ struct idpf_rx_queue *rxq = &cpfl_rxq->base;
int err = 0;
err = idpf_vc_rxq_config(vport, rxq);
@@ -610,15 +675,15 @@ cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
if (tx_queue_id >= dev->data->nb_tx_queues)
return -EINVAL;
- txq = dev->data->tx_queues[tx_queue_id];
+ cpfl_txq = dev->data->tx_queues[tx_queue_id];
/* Init the RX tail register. */
- IDPF_PCI_REG_WRITE(txq->qtx_tail, 0);
+ IDPF_PCI_REG_WRITE(cpfl_txq->base.qtx_tail, 0);
return 0;
}
@@ -626,12 +691,13 @@ cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
int
cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_tx_queue *txq =
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_tx_queue *cpfl_txq =
dev->data->tx_queues[tx_queue_id];
int err = 0;
- err = idpf_vc_txq_config(vport, txq);
+ err = idpf_vc_txq_config(vport, &cpfl_txq->base);
if (err != 0) {
PMD_DRV_LOG(ERR, "Fail to configure Tx queue %u", tx_queue_id);
return err;
@@ -650,7 +716,7 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
tx_queue_id);
} else {
- txq->q_started = true;
+ cpfl_txq->base.q_started = true;
dev->data->tx_queue_state[tx_queue_id] =
RTE_ETH_QUEUE_STATE_STARTED;
}
@@ -661,13 +727,16 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
int
cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq;
struct idpf_rx_queue *rxq;
int err;
if (rx_queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
+ cpfl_rxq = dev->data->rx_queues[rx_queue_id];
err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
@@ -675,7 +744,7 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return err;
}
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = &cpfl_rxq->base;
rxq->q_started = false;
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
rxq->ops->release_mbufs(rxq);
@@ -693,13 +762,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_tx_queue *cpfl_txq;
struct idpf_tx_queue *txq;
int err;
if (tx_queue_id >= dev->data->nb_tx_queues)
return -EINVAL;
+ cpfl_txq = dev->data->tx_queues[tx_queue_id];
+
err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
@@ -707,7 +780,7 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
return err;
}
- txq = dev->data->tx_queues[tx_queue_id];
+ txq = &cpfl_txq->base;
txq->q_started = false;
txq->ops->release_mbufs(txq);
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
@@ -724,25 +797,25 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
void
cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- idpf_qc_rx_queue_release(dev->data->rx_queues[qid]);
+ cpfl_rx_queue_release(dev->data->rx_queues[qid]);
}
void
cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- idpf_qc_tx_queue_release(dev->data->tx_queues[qid]);
+ cpfl_tx_queue_release(dev->data->tx_queues[qid]);
}
void
cpfl_stop_queues(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
- struct idpf_tx_queue *txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
int i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- if (rxq == NULL)
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq == NULL)
continue;
if (cpfl_rx_queue_stop(dev, i) != 0)
@@ -750,8 +823,8 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL)
continue;
if (cpfl_tx_queue_stop(dev, i) != 0)
@@ -762,9 +835,10 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
void
cpfl_set_rx_function(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
#ifdef RTE_ARCH_X86
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i;
if (cpfl_rx_vec_dev_check_default(dev) == CPFL_VECTOR_PATH &&
@@ -790,8 +864,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- (void)idpf_qc_splitq_rx_vec_setup(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ (void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
if (vport->rx_use_avx512) {
@@ -810,8 +884,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
} else {
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- (void)idpf_qc_singleq_rx_vec_setup(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ (void)idpf_qc_singleq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
if (vport->rx_use_avx512) {
@@ -860,10 +934,11 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
void
cpfl_set_tx_function(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
#ifdef RTE_ARCH_X86
#ifdef CC_AVX512_SUPPORT
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
int i;
#endif /* CC_AVX512_SUPPORT */
@@ -878,8 +953,8 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
vport->tx_use_avx512 = true;
if (vport->tx_use_avx512) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- idpf_qc_tx_vec_avx512_setup(txq);
+ cpfl_txq = dev->data->tx_queues[i];
+ idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);
}
}
}
@@ -916,10 +991,10 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
#ifdef CC_AVX512_SUPPORT
if (vport->tx_use_avx512) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL)
continue;
- idpf_qc_tx_vec_avx512_setup(txq);
+ idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);
}
PMD_DRV_LOG(NOTICE,
"Using Single AVX512 Vector Tx (port %d).",
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index fb267d38c8..bfb9ad97bd 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -23,6 +23,14 @@
#define CPFL_SUPPORT_CHAIN_NUM 5
+struct cpfl_rx_queue {
+ struct idpf_rx_queue base;
+};
+
+struct cpfl_tx_queue {
+ struct idpf_tx_queue base;
+};
+
int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
index 665418d27d..5690b17911 100644
--- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
+++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
@@ -76,15 +76,16 @@ cpfl_rx_splitq_vec_default(struct idpf_rx_queue *rxq)
static inline int
cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_rx_queue *rxq;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq;
int i, default_ret, splitq_ret, ret = CPFL_SCALAR_PATH;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- default_ret = cpfl_rx_vec_queue_default(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
- splitq_ret = cpfl_rx_splitq_vec_default(rxq);
+ splitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq->base);
ret = splitq_ret && default_ret;
} else {
ret = default_ret;
@@ -100,12 +101,12 @@ static inline int
cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)
{
int i;
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
int ret = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- ret = cpfl_tx_vec_queue_default(txq);
+ cpfl_txq = dev->data->tx_queues[i];
+ ret = cpfl_tx_vec_queue_default(&cpfl_txq->base);
if (ret == CPFL_SCALAR_PATH)
return CPFL_SCALAR_PATH;
}
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v5 02/13] common/idpf: support queue groups add/delete
2023-05-31 10:18 ` [PATCH v5 00/13] net/cpfl: add hairpin queue support beilei.xing
2023-05-31 10:18 ` [PATCH v5 01/13] net/cpfl: refine structures beilei.xing
@ 2023-05-31 10:18 ` beilei.xing
2023-05-31 10:18 ` [PATCH v5 03/13] net/cpfl: add haipin queue group during vport init beilei.xing
` (11 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:18 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds queue group add/delete virtual channel support.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 66 ++++++++++++++++++++++
drivers/common/idpf/idpf_common_virtchnl.h | 9 +++
drivers/common/idpf/version.map | 2 +
3 files changed, 77 insertions(+)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index b713678634..a3fe55c897 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -359,6 +359,72 @@ idpf_vc_vport_destroy(struct idpf_vport *vport)
return err;
}
+int
+idpf_vc_queue_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *p2p_queue_grps_info,
+ uint8_t *p2p_queue_grps_out)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_cmd_info args;
+ int size, qg_info_size;
+ int err = -1;
+
+ size = sizeof(*p2p_queue_grps_info) +
+ (p2p_queue_grps_info->qg_info.num_queue_groups - 1) *
+ sizeof(struct virtchnl2_queue_group_info);
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_ADD_QUEUE_GROUPS;
+ args.in_args = (uint8_t *)p2p_queue_grps_info;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ if (err != 0) {
+ DRV_LOG(ERR,
+ "Failed to execute command of VIRTCHNL2_OP_ADD_QUEUE_GROUPS");
+ return err;
+ }
+
+ rte_memcpy(p2p_queue_grps_out, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
+ return 0;
+}
+
+int idpf_vc_queue_grps_del(struct idpf_vport *vport,
+ uint16_t num_q_grps,
+ struct virtchnl2_queue_group_id *qg_ids)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_delete_queue_groups *vc_del_q_grps;
+ struct idpf_cmd_info args;
+ int size;
+ int err;
+
+ size = sizeof(*vc_del_q_grps) +
+ (num_q_grps - 1) * sizeof(struct virtchnl2_queue_group_id);
+ vc_del_q_grps = rte_zmalloc("vc_del_q_grps", size, 0);
+
+ vc_del_q_grps->vport_id = vport->vport_id;
+ vc_del_q_grps->num_queue_groups = num_q_grps;
+ memcpy(vc_del_q_grps->qg_ids, qg_ids,
+ num_q_grps * sizeof(struct virtchnl2_queue_group_id));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_DEL_QUEUE_GROUPS;
+ args.in_args = (uint8_t *)vc_del_q_grps;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_DEL_QUEUE_GROUPS");
+
+ rte_free(vc_del_q_grps);
+ return err;
+}
+
int
idpf_vc_rss_key_set(struct idpf_vport *vport)
{
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index c45295290e..58b16e1c5d 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -64,4 +64,13 @@ int idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
__rte_internal
int idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
u16 *buff_count, struct idpf_dma_mem **buffs);
+__rte_internal
+int idpf_vc_queue_grps_del(struct idpf_vport *vport,
+ uint16_t num_q_grps,
+ struct virtchnl2_queue_group_id *qg_ids);
+__rte_internal
+int
+idpf_vc_queue_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *ptp_queue_grps_info,
+ uint8_t *ptp_queue_grps_out);
#endif /* _IDPF_COMMON_VIRTCHNL_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 70334a1b03..01d18f3f3f 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -43,6 +43,8 @@ INTERNAL {
idpf_vc_irq_map_unmap_config;
idpf_vc_one_msg_read;
idpf_vc_ptype_info_query;
+ idpf_vc_queue_grps_add;
+ idpf_vc_queue_grps_del;
idpf_vc_queue_switch;
idpf_vc_queues_ena_dis;
idpf_vc_rss_hash_get;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v5 03/13] net/cpfl: add haipin queue group during vport init
2023-05-31 10:18 ` [PATCH v5 00/13] net/cpfl: add hairpin queue support beilei.xing
2023-05-31 10:18 ` [PATCH v5 01/13] net/cpfl: refine structures beilei.xing
2023-05-31 10:18 ` [PATCH v5 02/13] common/idpf: support queue groups add/delete beilei.xing
@ 2023-05-31 10:18 ` beilei.xing
2023-05-31 10:18 ` [PATCH v5 04/13] net/cpfl: support hairpin queue capbility get beilei.xing
` (10 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:18 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds haipin queue group during vport init.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 133 +++++++++++++++++++++++++++++++++
drivers/net/cpfl/cpfl_ethdev.h | 18 +++++
drivers/net/cpfl/cpfl_rxtx.h | 7 ++
3 files changed, 158 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index e587155db6..c1273a7478 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -840,6 +840,20 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
return 0;
}
+static int
+cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
+{
+ struct virtchnl2_queue_group_id qg_ids[CPFL_P2P_NB_QUEUE_GRPS] = {0};
+ int ret = 0;
+
+ qg_ids[0].queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
+ qg_ids[0].queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
+ ret = idpf_vc_queue_grps_del(vport, CPFL_P2P_NB_QUEUE_GRPS, qg_ids);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to delete p2p queue groups");
+ return ret;
+}
+
static int
cpfl_dev_close(struct rte_eth_dev *dev)
{
@@ -848,7 +862,12 @@ cpfl_dev_close(struct rte_eth_dev *dev)
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
+
+ if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
+ cpfl_p2p_queue_grps_del(vport);
+
idpf_vport_deinit(vport);
+ rte_free(cpfl_vport->p2p_q_chunks_info);
adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
adapter->cur_vport_nb--;
@@ -1284,6 +1303,96 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter)
return vport_idx;
}
+static int
+cpfl_p2p_q_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *p2p_queue_grps_info,
+ uint8_t *p2p_q_vc_out_info)
+{
+ int ret;
+
+ p2p_queue_grps_info->vport_id = vport->vport_id;
+ p2p_queue_grps_info->qg_info.num_queue_groups = CPFL_P2P_NB_QUEUE_GRPS;
+ p2p_queue_grps_info->qg_info.groups[0].num_rx_q = CPFL_MAX_P2P_NB_QUEUES;
+ p2p_queue_grps_info->qg_info.groups[0].num_rx_bufq = CPFL_P2P_NB_RX_BUFQ;
+ p2p_queue_grps_info->qg_info.groups[0].num_tx_q = CPFL_MAX_P2P_NB_QUEUES;
+ p2p_queue_grps_info->qg_info.groups[0].num_tx_complq = CPFL_P2P_NB_TX_COMPLQ;
+ p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
+ p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
+ p2p_queue_grps_info->qg_info.groups[0].rx_q_grp_info.rss_lut_size = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.tx_tc = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.priority = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.is_sp = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.pir_weight = 0;
+
+ ret = idpf_vc_queue_grps_add(vport, p2p_queue_grps_info, p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to add p2p queue groups.");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
+ struct virtchnl2_add_queue_groups *p2p_q_vc_out_info)
+{
+ struct p2p_queue_chunks_info *p2p_q_chunks_info = cpfl_vport->p2p_q_chunks_info;
+ struct virtchnl2_queue_reg_chunks *vc_chunks_out;
+ int i, type;
+
+ if (p2p_q_vc_out_info->qg_info.groups[0].qg_id.queue_group_type !=
+ VIRTCHNL2_QUEUE_GROUP_P2P) {
+ PMD_DRV_LOG(ERR, "Add queue group response mismatch.");
+ return -EINVAL;
+ }
+
+ vc_chunks_out = &p2p_q_vc_out_info->qg_info.groups[0].chunks;
+
+ for (i = 0; i < vc_chunks_out->num_chunks; i++) {
+ type = vc_chunks_out->chunks[i].type;
+ switch (type) {
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ p2p_q_chunks_info->tx_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->tx_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->tx_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ p2p_q_chunks_info->rx_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->rx_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->rx_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ p2p_q_chunks_info->tx_compl_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->tx_compl_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->tx_compl_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ p2p_q_chunks_info->rx_buf_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->rx_buf_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->rx_buf_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported queue type");
+ break;
+ }
+ }
+
+ return 0;
+}
+
static int
cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
{
@@ -1293,6 +1402,8 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
struct cpfl_adapter_ext *adapter = param->adapter;
/* for sending create vport virtchnl msg prepare */
struct virtchnl2_create_vport create_vport_info;
+ struct virtchnl2_add_queue_groups p2p_queue_grps_info;
+ uint8_t p2p_q_vc_out_info[IDPF_DFLT_MBX_BUF_SIZE] = {0};
int ret = 0;
dev->dev_ops = &cpfl_eth_dev_ops;
@@ -1327,6 +1438,28 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
&dev->data->mac_addrs[0]);
+ if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
+ memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
+ ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to add p2p queue group.");
+ return 0;
+ }
+ cpfl_vport->p2p_q_chunks_info = rte_zmalloc(NULL,
+ sizeof(struct p2p_queue_chunks_info), 0);
+ if (cpfl_vport->p2p_q_chunks_info == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate p2p queue info.");
+ cpfl_p2p_queue_grps_del(vport);
+ return 0;
+ }
+ ret = cpfl_p2p_queue_info_init(cpfl_vport,
+ (struct virtchnl2_add_queue_groups *)p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to init p2p queue info.");
+ cpfl_p2p_queue_grps_del(vport);
+ }
+ }
+
return 0;
err_mac_addrs:
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 81fe9ac4c3..666d46a44a 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -56,6 +56,7 @@
/* Device IDs */
#define IDPF_DEV_ID_CPF 0x1453
+#define VIRTCHNL2_QUEUE_GROUP_P2P 0x100
struct cpfl_vport_param {
struct cpfl_adapter_ext *adapter;
@@ -69,8 +70,25 @@ struct cpfl_devargs {
uint16_t req_vport_nb;
};
+struct p2p_queue_chunks_info {
+ uint32_t tx_start_qid;
+ uint32_t rx_start_qid;
+ uint32_t tx_compl_start_qid;
+ uint32_t rx_buf_start_qid;
+
+ uint64_t tx_qtail_start;
+ uint32_t tx_qtail_spacing;
+ uint64_t rx_qtail_start;
+ uint32_t rx_qtail_spacing;
+ uint64_t tx_compl_qtail_start;
+ uint32_t tx_compl_qtail_spacing;
+ uint64_t rx_buf_qtail_start;
+ uint32_t rx_buf_qtail_spacing;
+};
+
struct cpfl_vport {
struct idpf_vport base;
+ struct p2p_queue_chunks_info *p2p_q_chunks_info;
};
struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index bfb9ad97bd..1fe65778f0 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -13,6 +13,13 @@
#define CPFL_MIN_RING_DESC 32
#define CPFL_MAX_RING_DESC 4096
#define CPFL_DMA_MEM_ALIGN 4096
+
+#define CPFL_MAX_P2P_NB_QUEUES 16
+#define CPFL_P2P_NB_RX_BUFQ 1
+#define CPFL_P2P_NB_TX_COMPLQ 1
+#define CPFL_P2P_NB_QUEUE_GRPS 1
+#define CPFL_P2P_QUEUE_GRP_ID 1
+
/* Base address of the HW descriptor ring should be 128B aligned. */
#define CPFL_RING_BASE_ALIGN 128
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v5 04/13] net/cpfl: support hairpin queue capbility get
2023-05-31 10:18 ` [PATCH v5 00/13] net/cpfl: add hairpin queue support beilei.xing
` (2 preceding siblings ...)
2023-05-31 10:18 ` [PATCH v5 03/13] net/cpfl: add haipin queue group during vport init beilei.xing
@ 2023-05-31 10:18 ` beilei.xing
2023-05-31 10:18 ` [PATCH v5 05/13] net/cpfl: support hairpin queue setup and release beilei.xing
` (9 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:18 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch adds hairpin_cap_get ops support.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 18 ++++++++++++++++++
drivers/net/cpfl/cpfl_rxtx.h | 3 +++
2 files changed, 21 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index c1273a7478..40b4515539 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -154,6 +154,23 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,
return rte_eth_linkstatus_set(dev, &new_link);
}
+static int
+cpfl_hairpin_cap_get(struct rte_eth_dev *dev,
+ struct rte_eth_hairpin_cap *cap)
+{
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+
+ if (cpfl_vport->p2p_q_chunks_info == NULL)
+ return -ENOTSUP;
+
+ cap->max_nb_queues = CPFL_MAX_P2P_NB_QUEUES;
+ cap->max_rx_2_tx = CPFL_MAX_HAIRPINQ_RX_2_TX;
+ cap->max_tx_2_rx = CPFL_MAX_HAIRPINQ_TX_2_RX;
+ cap->max_nb_desc = CPFL_MAX_HAIRPINQ_NB_DESC;
+
+ return 0;
+}
+
static int
cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
@@ -904,6 +921,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.xstats_get = cpfl_dev_xstats_get,
.xstats_get_names = cpfl_dev_xstats_get_names,
.xstats_reset = cpfl_dev_xstats_reset,
+ .hairpin_cap_get = cpfl_hairpin_cap_get,
};
static int
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 1fe65778f0..a4a164d462 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -14,6 +14,9 @@
#define CPFL_MAX_RING_DESC 4096
#define CPFL_DMA_MEM_ALIGN 4096
+#define CPFL_MAX_HAIRPINQ_RX_2_TX 1
+#define CPFL_MAX_HAIRPINQ_TX_2_RX 1
+#define CPFL_MAX_HAIRPINQ_NB_DESC 1024
#define CPFL_MAX_P2P_NB_QUEUES 16
#define CPFL_P2P_NB_RX_BUFQ 1
#define CPFL_P2P_NB_TX_COMPLQ 1
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v5 05/13] net/cpfl: support hairpin queue setup and release
2023-05-31 10:18 ` [PATCH v5 00/13] net/cpfl: add hairpin queue support beilei.xing
` (3 preceding siblings ...)
2023-05-31 10:18 ` [PATCH v5 04/13] net/cpfl: support hairpin queue capbility get beilei.xing
@ 2023-05-31 10:18 ` beilei.xing
2023-05-31 10:18 ` [PATCH v5 06/13] common/idpf: add queue config API beilei.xing
` (8 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:18 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
Support hairpin Rx/Tx queue setup and release.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 6 +
drivers/net/cpfl/cpfl_ethdev.h | 11 +
drivers/net/cpfl/cpfl_rxtx.c | 364 +++++++++++++++++++++++-
drivers/net/cpfl/cpfl_rxtx.h | 36 +++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 4 +
5 files changed, 420 insertions(+), 1 deletion(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 40b4515539..b17c538ec2 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -879,6 +879,10 @@ cpfl_dev_close(struct rte_eth_dev *dev)
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
+ if (cpfl_vport->p2p_mp) {
+ rte_mempool_free(cpfl_vport->p2p_mp);
+ cpfl_vport->p2p_mp = NULL;
+ }
if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
cpfl_p2p_queue_grps_del(vport);
@@ -922,6 +926,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.xstats_get_names = cpfl_dev_xstats_get_names,
.xstats_reset = cpfl_dev_xstats_reset,
.hairpin_cap_get = cpfl_hairpin_cap_get,
+ .rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
+ .tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
};
static int
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 666d46a44a..2e42354f70 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -89,6 +89,17 @@ struct p2p_queue_chunks_info {
struct cpfl_vport {
struct idpf_vport base;
struct p2p_queue_chunks_info *p2p_q_chunks_info;
+
+ struct rte_mempool *p2p_mp;
+
+ uint16_t nb_data_rxq;
+ uint16_t nb_data_txq;
+ uint16_t nb_p2p_rxq;
+ uint16_t nb_p2p_txq;
+
+ struct idpf_rx_queue *p2p_rx_bufq;
+ struct idpf_tx_queue *p2p_tx_complq;
+ bool p2p_manual_bind;
};
struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 04a51b8d15..90b408d1f4 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -10,6 +10,67 @@
#include "cpfl_rxtx.h"
#include "cpfl_rxtx_vec_common.h"
+static inline void
+cpfl_tx_hairpin_descq_reset(struct idpf_tx_queue *txq)
+{
+ uint32_t i, size;
+
+ if (!txq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+ return;
+ }
+
+ size = txq->nb_tx_desc * CPFL_P2P_DESC_LEN;
+ for (i = 0; i < size; i++)
+ ((volatile char *)txq->desc_ring)[i] = 0;
+}
+
+static inline void
+cpfl_tx_hairpin_complq_reset(struct idpf_tx_queue *cq)
+{
+ uint32_t i, size;
+
+ if (!cq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to complq is NULL");
+ return;
+ }
+
+ size = cq->nb_tx_desc * CPFL_P2P_DESC_LEN;
+ for (i = 0; i < size; i++)
+ ((volatile char *)cq->compl_ring)[i] = 0;
+}
+
+static inline void
+cpfl_rx_hairpin_descq_reset(struct idpf_rx_queue *rxq)
+{
+ uint16_t len;
+ uint32_t i;
+
+ if (!rxq)
+ return;
+
+ len = rxq->nb_rx_desc;
+ for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
+ ((volatile char *)rxq->rx_ring)[i] = 0;
+}
+
+static inline void
+cpfl_rx_hairpin_bufq_reset(struct idpf_rx_queue *rxbq)
+{
+ uint16_t len;
+ uint32_t i;
+
+ if (!rxbq)
+ return;
+
+ len = rxbq->nb_rx_desc;
+ for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
+ ((volatile char *)rxbq->rx_ring)[i] = 0;
+
+ rxbq->bufq1 = NULL;
+ rxbq->bufq2 = NULL;
+}
+
static uint64_t
cpfl_rx_offload_convert(uint64_t offload)
{
@@ -234,7 +295,10 @@ cpfl_rx_queue_release(void *rxq)
/* Split queue */
if (!q->adapter->is_rx_singleq) {
- if (q->bufq2)
+ /* the mz is shared between Tx/Rx hairpin, let Rx_release
+ * free the buf, q->bufq1->mz and q->mz.
+ */
+ if (!cpfl_rxq->hairpin_info.hairpin_q && q->bufq2)
cpfl_rx_split_bufq_release(q->bufq2);
if (q->bufq1)
@@ -385,6 +449,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
}
+ cpfl_vport->nb_data_rxq++;
rxq->q_set = true;
dev->data->rx_queues[queue_idx] = cpfl_rxq;
@@ -548,6 +613,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
queue_idx * vport->chunks_info.tx_qtail_spacing);
txq->ops = &def_txq_ops;
+ cpfl_vport->nb_data_txq++;
txq->q_set = true;
dev->data->tx_queues[queue_idx] = cpfl_txq;
@@ -562,6 +628,300 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return ret;
}
+static int
+cpfl_rx_hairpin_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,
+ uint16_t logic_qid, uint16_t nb_desc)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct rte_mempool *mp;
+ char pool_name[RTE_MEMPOOL_NAMESIZE];
+
+ mp = cpfl_vport->p2p_mp;
+ if (!mp) {
+ snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "p2p_mb_pool_%u",
+ dev->data->port_id);
+ mp = rte_pktmbuf_pool_create(pool_name, CPFL_P2P_NB_MBUF * CPFL_MAX_P2P_NB_QUEUES,
+ CPFL_P2P_CACHE_SIZE, 0, CPFL_P2P_MBUF_SIZE,
+ dev->device->numa_node);
+ if (!mp) {
+ PMD_INIT_LOG(ERR, "Failed to allocate mbuf pool for p2p");
+ return -ENOMEM;
+ }
+ cpfl_vport->p2p_mp = mp;
+ }
+
+ bufq->mp = mp;
+ bufq->nb_rx_desc = nb_desc;
+ bufq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_buf_start_qid,
+ logic_qid);
+ bufq->port_id = dev->data->port_id;
+ bufq->adapter = adapter;
+ bufq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
+
+ bufq->q_set = true;
+ bufq->ops = &def_rxq_ops;
+
+ return 0;
+}
+
+int
+cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf)
+{
+ struct cpfl_vport *cpfl_vport = (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter_base = vport->adapter;
+ uint16_t logic_qid = cpfl_vport->nb_p2p_rxq;
+ struct cpfl_rxq_hairpin_info *hairpin_info;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct idpf_rx_queue *bufq1 = NULL;
+ struct idpf_rx_queue *rxq;
+ uint16_t peer_port, peer_q;
+ uint16_t qid;
+ int ret;
+
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue.");
+ return -EINVAL;
+ }
+
+ if (conf->peer_count != 1) {
+ PMD_INIT_LOG(ERR, "Can't support Rx hairpin queue peer count %d", conf->peer_count);
+ return -EINVAL;
+ }
+
+ peer_port = conf->peers[0].port;
+ peer_q = conf->peers[0].queue;
+
+ if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
+ nb_desc > CPFL_MAX_RING_DESC ||
+ nb_desc < CPFL_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is invalid", nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory if needed */
+ if (dev->data->rx_queues[queue_idx]) {
+ cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* Setup Rx description queue */
+ cpfl_rxq = rte_zmalloc_socket("cpfl hairpin rxq",
+ sizeof(struct cpfl_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!cpfl_rxq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
+ return -ENOMEM;
+ }
+
+ rxq = &cpfl_rxq->base;
+ hairpin_info = &cpfl_rxq->hairpin_info;
+ rxq->nb_rx_desc = nb_desc * 2;
+ rxq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid, logic_qid);
+ rxq->port_id = dev->data->port_id;
+ rxq->adapter = adapter_base;
+ rxq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
+ hairpin_info->hairpin_q = true;
+ hairpin_info->peer_txp = peer_port;
+ hairpin_info->peer_txq_id = peer_q;
+
+ if (conf->manual_bind != 0)
+ cpfl_vport->p2p_manual_bind = true;
+ else
+ cpfl_vport->p2p_manual_bind = false;
+
+ if (cpfl_vport->p2p_rx_bufq == NULL) {
+ bufq1 = rte_zmalloc_socket("hairpin rx bufq1",
+ sizeof(struct idpf_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!bufq1) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for hairpin Rx buffer queue 1.");
+ ret = -ENOMEM;
+ goto err_alloc_bufq1;
+ }
+ qid = 2 * logic_qid;
+ ret = cpfl_rx_hairpin_bufq_setup(dev, bufq1, qid, nb_desc);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to setup hairpin Rx buffer queue 1");
+ ret = -EINVAL;
+ goto err_setup_bufq1;
+ }
+ cpfl_vport->p2p_rx_bufq = bufq1;
+ }
+
+ rxq->bufq1 = cpfl_vport->p2p_rx_bufq;
+ rxq->bufq2 = NULL;
+
+ cpfl_vport->nb_p2p_rxq++;
+ rxq->q_set = true;
+ dev->data->rx_queues[queue_idx] = cpfl_rxq;
+
+ return 0;
+
+err_setup_bufq1:
+ rte_mempool_free(cpfl_vport->p2p_mp);
+ rte_free(bufq1);
+err_alloc_bufq1:
+ rte_free(cpfl_rxq);
+
+ return ret;
+}
+
+int
+cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter_base = vport->adapter;
+ uint16_t logic_qid = cpfl_vport->nb_p2p_txq;
+ struct cpfl_txq_hairpin_info *hairpin_info;
+ struct idpf_hw *hw = &adapter_base->hw;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct idpf_tx_queue *txq, *cq;
+ const struct rte_memzone *mz;
+ uint32_t ring_size;
+ uint16_t peer_port, peer_q;
+ int ret;
+
+ if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue.");
+ return -EINVAL;
+ }
+
+ if (conf->peer_count != 1) {
+ PMD_INIT_LOG(ERR, "Can't support Tx hairpin queue peer count %d", conf->peer_count);
+ return -EINVAL;
+ }
+
+ peer_port = conf->peers[0].port;
+ peer_q = conf->peers[0].queue;
+
+ if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
+ nb_desc > CPFL_MAX_RING_DESC ||
+ nb_desc < CPFL_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is invalid",
+ nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory if needed. */
+ if (dev->data->tx_queues[queue_idx]) {
+ cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate the TX queue data structure. */
+ cpfl_txq = rte_zmalloc_socket("cpfl hairpin txq",
+ sizeof(struct cpfl_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!cpfl_txq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+ return -ENOMEM;
+ }
+
+ txq = &cpfl_txq->base;
+ hairpin_info = &cpfl_txq->hairpin_info;
+ /* Txq ring length should be 2 times of Tx completion queue size. */
+ txq->nb_tx_desc = nb_desc * 2;
+ txq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_start_qid, logic_qid);
+ txq->port_id = dev->data->port_id;
+ hairpin_info->hairpin_q = true;
+ hairpin_info->peer_rxp = peer_port;
+ hairpin_info->peer_rxq_id = peer_q;
+
+ if (conf->manual_bind != 0)
+ cpfl_vport->p2p_manual_bind = true;
+ else
+ cpfl_vport->p2p_manual_bind = false;
+
+ /* Always Tx hairpin queue allocates Tx HW ring */
+ ring_size = RTE_ALIGN(txq->nb_tx_desc * CPFL_P2P_DESC_LEN,
+ CPFL_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_ring", logic_qid,
+ ring_size + CPFL_P2P_RING_BUF,
+ CPFL_RING_BASE_ALIGN,
+ dev->device->numa_node);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
+ ret = -ENOMEM;
+ goto err_txq_mz_rsv;
+ }
+
+ txq->tx_ring_phys_addr = mz->iova;
+ txq->desc_ring = mz->addr;
+ txq->mz = mz;
+
+ cpfl_tx_hairpin_descq_reset(txq);
+ txq->qtx_tail = hw->hw_addr +
+ cpfl_hw_qtail_get(cpfl_vport->p2p_q_chunks_info->tx_qtail_start,
+ logic_qid, cpfl_vport->p2p_q_chunks_info->tx_qtail_spacing);
+ txq->ops = &def_txq_ops;
+
+ if (cpfl_vport->p2p_tx_complq == NULL) {
+ cq = rte_zmalloc_socket("cpfl hairpin cq",
+ sizeof(struct idpf_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ dev->device->numa_node);
+ if (!cq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+ ret = -ENOMEM;
+ goto err_cq_alloc;
+ }
+
+ cq->nb_tx_desc = nb_desc;
+ cq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_compl_start_qid,
+ 0);
+ cq->port_id = dev->data->port_id;
+
+ /* Tx completion queue always allocates the HW ring */
+ ring_size = RTE_ALIGN(cq->nb_tx_desc * CPFL_P2P_DESC_LEN,
+ CPFL_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_compl_ring", logic_qid,
+ ring_size + CPFL_P2P_RING_BUF,
+ CPFL_RING_BASE_ALIGN,
+ dev->device->numa_node);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX completion queue");
+ ret = -ENOMEM;
+ goto err_cq_mz_rsv;
+ }
+ cq->tx_ring_phys_addr = mz->iova;
+ cq->compl_ring = mz->addr;
+ cq->mz = mz;
+
+ cpfl_tx_hairpin_complq_reset(cq);
+ cpfl_vport->p2p_tx_complq = cq;
+ }
+
+ txq->complq = cpfl_vport->p2p_tx_complq;
+
+ cpfl_vport->nb_p2p_txq++;
+ txq->q_set = true;
+ dev->data->tx_queues[queue_idx] = cpfl_txq;
+
+ return 0;
+
+err_cq_mz_rsv:
+ rte_free(cq);
+err_cq_alloc:
+ cpfl_dma_zone_release(mz);
+err_txq_mz_rsv:
+ rte_free(cpfl_txq);
+ return ret;
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
@@ -865,6 +1225,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ continue;
(void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index a4a164d462..06198d4aad 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -22,6 +22,11 @@
#define CPFL_P2P_NB_TX_COMPLQ 1
#define CPFL_P2P_NB_QUEUE_GRPS 1
#define CPFL_P2P_QUEUE_GRP_ID 1
+#define CPFL_P2P_DESC_LEN 16
+#define CPFL_P2P_NB_MBUF 4096
+#define CPFL_P2P_CACHE_SIZE 250
+#define CPFL_P2P_MBUF_SIZE 2048
+#define CPFL_P2P_RING_BUF 128
/* Base address of the HW descriptor ring should be 128B aligned. */
#define CPFL_RING_BASE_ALIGN 128
@@ -33,14 +38,40 @@
#define CPFL_SUPPORT_CHAIN_NUM 5
+struct cpfl_rxq_hairpin_info {
+ bool hairpin_q; /* if rx queue is a hairpin queue */
+ uint16_t peer_txp;
+ uint16_t peer_txq_id;
+};
+
struct cpfl_rx_queue {
struct idpf_rx_queue base;
+ struct cpfl_rxq_hairpin_info hairpin_info;
+};
+
+struct cpfl_txq_hairpin_info {
+ bool hairpin_q; /* if tx queue is a hairpin queue */
+ uint16_t peer_rxp;
+ uint16_t peer_rxq_id;
};
struct cpfl_tx_queue {
struct idpf_tx_queue base;
+ struct cpfl_txq_hairpin_info hairpin_info;
};
+static inline uint16_t
+cpfl_hw_qid_get(uint16_t start_qid, uint16_t offset)
+{
+ return start_qid + offset;
+}
+
+static inline uint64_t
+cpfl_hw_qtail_get(uint64_t tail_start, uint16_t offset, uint64_t tail_spacing)
+{
+ return tail_start + offset * tail_spacing;
+}
+
int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
@@ -59,4 +90,9 @@ void cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void cpfl_set_rx_function(struct rte_eth_dev *dev);
void cpfl_set_tx_function(struct rte_eth_dev *dev);
+int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, const struct rte_eth_hairpin_conf *conf);
+int cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf);
#endif /* _CPFL_RXTX_H_ */
diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
index 5690b17911..d8e9191196 100644
--- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
+++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
@@ -85,6 +85,8 @@ cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)
cpfl_rxq = dev->data->rx_queues[i];
default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ continue;
splitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq->base);
ret = splitq_ret && default_ret;
} else {
@@ -106,6 +108,8 @@ cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_tx_queues; i++) {
cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq->hairpin_info.hairpin_q)
+ continue;
ret = cpfl_tx_vec_queue_default(&cpfl_txq->base);
if (ret == CPFL_SCALAR_PATH)
return CPFL_SCALAR_PATH;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v5 06/13] common/idpf: add queue config API
2023-05-31 10:18 ` [PATCH v5 00/13] net/cpfl: add hairpin queue support beilei.xing
` (4 preceding siblings ...)
2023-05-31 10:18 ` [PATCH v5 05/13] net/cpfl: support hairpin queue setup and release beilei.xing
@ 2023-05-31 10:18 ` beilei.xing
2023-05-31 10:18 ` [PATCH v5 07/13] net/cpfl: support hairpin queue configuration beilei.xing
` (7 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:18 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx queue configuration APIs.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 70 ++++++++++++++++++++++
drivers/common/idpf/idpf_common_virtchnl.h | 6 ++
drivers/common/idpf/version.map | 2 +
3 files changed, 78 insertions(+)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index a3fe55c897..211b44a88e 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -1050,6 +1050,41 @@ idpf_vc_rxq_config(struct idpf_vport *vport, struct idpf_rx_queue *rxq)
return err;
}
+int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct virtchnl2_rxq_info *rxq_info,
+ uint16_t num_qs)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+ struct idpf_cmd_info args;
+ int size, err, i;
+
+ size = sizeof(*vc_rxqs) + (num_qs - 1) *
+ sizeof(struct virtchnl2_rxq_info);
+ vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+ if (vc_rxqs == NULL) {
+ DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+ err = -ENOMEM;
+ return err;
+ }
+ vc_rxqs->vport_id = vport->vport_id;
+ vc_rxqs->num_qinfo = num_qs;
+ memcpy(vc_rxqs->qinfo, rxq_info, num_qs * sizeof(struct virtchnl2_rxq_info));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+ args.in_args = (uint8_t *)vc_rxqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ rte_free(vc_rxqs);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+ return err;
+}
+
int
idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq)
{
@@ -1121,6 +1156,41 @@ idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq)
return err;
}
+int
+idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
+ uint16_t num_qs)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+ struct idpf_cmd_info args;
+ int size, err;
+
+ size = sizeof(*vc_txqs) + (num_qs - 1) * sizeof(struct virtchnl2_txq_info);
+ vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+ if (vc_txqs == NULL) {
+ DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+ err = -ENOMEM;
+ return err;
+ }
+ vc_txqs->vport_id = vport->vport_id;
+ vc_txqs->num_qinfo = num_qs;
+ memcpy(vc_txqs->qinfo, txq_info, num_qs * sizeof(struct virtchnl2_txq_info));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+ args.in_args = (uint8_t *)vc_txqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ rte_free(vc_txqs);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+ return err;
+}
+
int
idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
struct idpf_ctlq_msg *q_msg)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index 58b16e1c5d..db83761a5e 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -65,6 +65,12 @@ __rte_internal
int idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
u16 *buff_count, struct idpf_dma_mem **buffs);
__rte_internal
+int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct virtchnl2_rxq_info *rxq_info,
+ uint16_t num_qs);
+__rte_internal
+int idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
+ uint16_t num_qs);
+__rte_internal
int idpf_vc_queue_grps_del(struct idpf_vport *vport,
uint16_t num_q_grps,
struct virtchnl2_queue_group_id *qg_ids);
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 01d18f3f3f..17e77884ce 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -54,8 +54,10 @@ INTERNAL {
idpf_vc_rss_lut_get;
idpf_vc_rss_lut_set;
idpf_vc_rxq_config;
+ idpf_vc_rxq_config_by_info;
idpf_vc_stats_query;
idpf_vc_txq_config;
+ idpf_vc_txq_config_by_info;
idpf_vc_vectors_alloc;
idpf_vc_vectors_dealloc;
idpf_vc_vport_create;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v5 07/13] net/cpfl: support hairpin queue configuration
2023-05-31 10:18 ` [PATCH v5 00/13] net/cpfl: add hairpin queue support beilei.xing
` (5 preceding siblings ...)
2023-05-31 10:18 ` [PATCH v5 06/13] common/idpf: add queue config API beilei.xing
@ 2023-05-31 10:18 ` beilei.xing
2023-05-31 10:18 ` [PATCH v5 08/13] common/idpf: add switch queue API beilei.xing
` (6 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:18 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx hairpin queue configuration.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 136 +++++++++++++++++++++++++++++++--
drivers/net/cpfl/cpfl_rxtx.c | 80 +++++++++++++++++++
drivers/net/cpfl/cpfl_rxtx.h | 7 ++
3 files changed, 217 insertions(+), 6 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index b17c538ec2..a06def06d0 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -742,33 +742,157 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
return idpf_vport_irq_map_config(vport, nb_rx_queues);
}
+/* Update hairpin_info for dev's tx hairpin queue */
+static int
+cpfl_txq_hairpin_info_update(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port];
+ struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private;
+ struct cpfl_txq_hairpin_info *hairpin_info;
+ struct cpfl_tx_queue *cpfl_txq;
+ int i;
+
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ hairpin_info = &cpfl_txq->hairpin_info;
+ if (hairpin_info->peer_rxp != rx_port) {
+ PMD_DRV_LOG(ERR, "port %d is not the peer port", rx_port);
+ return -EINVAL;
+ }
+ hairpin_info->peer_rxq_id =
+ cpfl_hw_qid_get(cpfl_rx_vport->p2p_q_chunks_info->rx_start_qid,
+ hairpin_info->peer_rxq_id - cpfl_rx_vport->nb_data_rxq);
+ }
+
+ return 0;
+}
+
+/* Bind Rx hairpin queue's memory zone to peer Tx hairpin queue's memory zone */
+static void
+cpfl_rxq_hairpin_mz_bind(struct rte_eth_dev *dev)
+{
+ struct cpfl_vport *cpfl_rx_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_rx_vport->base;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_hw *hw = &adapter->hw;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct rte_eth_dev *peer_dev;
+ const struct rte_memzone *mz;
+ uint16_t peer_tx_port;
+ uint16_t peer_tx_qid;
+ int i;
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ peer_tx_port = cpfl_rxq->hairpin_info.peer_txp;
+ peer_tx_qid = cpfl_rxq->hairpin_info.peer_txq_id;
+ peer_dev = &rte_eth_devices[peer_tx_port];
+ cpfl_txq = peer_dev->data->tx_queues[peer_tx_qid];
+
+ /* bind rx queue */
+ mz = cpfl_txq->base.mz;
+ cpfl_rxq->base.rx_ring_phys_addr = mz->iova;
+ cpfl_rxq->base.rx_ring = mz->addr;
+ cpfl_rxq->base.mz = mz;
+
+ /* bind rx buffer queue */
+ mz = cpfl_txq->base.complq->mz;
+ cpfl_rxq->base.bufq1->rx_ring_phys_addr = mz->iova;
+ cpfl_rxq->base.bufq1->rx_ring = mz->addr;
+ cpfl_rxq->base.bufq1->mz = mz;
+ cpfl_rxq->base.bufq1->qrx_tail = hw->hw_addr +
+ cpfl_hw_qtail_get(cpfl_rx_vport->p2p_q_chunks_info->rx_buf_qtail_start,
+ 0, cpfl_rx_vport->p2p_q_chunks_info->rx_buf_qtail_spacing);
+ }
+}
+
static int
cpfl_start_queues(struct rte_eth_dev *dev)
{
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_rx_queue *cpfl_rxq;
struct cpfl_tx_queue *cpfl_txq;
+ int update_flag = 0;
int err = 0;
int i;
+ /* For normal data queues, configure, init and enale Txq.
+ * For non-manual bind hairpin queues, configure Txq.
+ */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
cpfl_txq = dev->data->tx_queues[i];
if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
continue;
- err = cpfl_tx_queue_start(dev, i);
+ if (!cpfl_txq->hairpin_info.hairpin_q) {
+ err = cpfl_tx_queue_start(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
+ return err;
+ }
+ } else if (!cpfl_vport->p2p_manual_bind) {
+ if (update_flag == 0) {
+ err = cpfl_txq_hairpin_info_update(dev,
+ cpfl_txq->hairpin_info.peer_rxp);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to update Tx hairpin queue info");
+ return err;
+ }
+ update_flag = 1;
+ }
+ err = cpfl_hairpin_txq_config(vport, cpfl_txq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u", i);
+ return err;
+ }
+ }
+ }
+
+ /* For non-manual bind hairpin queues, configure Tx completion queue first.*/
+ if (!cpfl_vport->p2p_manual_bind && cpfl_vport->p2p_tx_complq != NULL) {
+ err = cpfl_hairpin_tx_complq_config(cpfl_vport);
if (err != 0) {
- PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
+ PMD_DRV_LOG(ERR, "Fail to config Tx completion queue");
return err;
}
}
+ /* For non-manual bind hairpin queues, configure Rx buffer queue.*/
+ if (!cpfl_vport->p2p_manual_bind && cpfl_vport->p2p_rx_bufq != NULL) {
+ cpfl_rxq_hairpin_mz_bind(dev);
+ err = cpfl_hairpin_rx_bufq_config(cpfl_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue");
+ return err;
+ }
+ }
+
+ /* For normal data queues, configure, init and enale Rxq.
+ * For non-manual bind hairpin queues, configure Rxq, and then init Rxq.
+ */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)
continue;
- err = cpfl_rx_queue_start(dev, i);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
- return err;
+ if (!cpfl_rxq->hairpin_info.hairpin_q) {
+ err = cpfl_rx_queue_start(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
+ return err;
+ }
+ } else if (!cpfl_vport->p2p_manual_bind) {
+ err = cpfl_hairpin_rxq_config(vport, cpfl_rxq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue %u", i);
+ return err;
+ }
+ err = cpfl_rx_queue_init(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u", i);
+ return err;
+ }
}
}
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 90b408d1f4..9408c6e1a4 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -922,6 +922,86 @@ cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return ret;
}
+int
+cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport)
+{
+ struct idpf_rx_queue *rx_bufq = cpfl_vport->p2p_rx_bufq;
+ struct virtchnl2_rxq_info rxq_info[1] = {0};
+
+ rxq_info[0].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ rxq_info[0].queue_id = rx_bufq->queue_id;
+ rxq_info[0].ring_len = rx_bufq->nb_rx_desc;
+ rxq_info[0].dma_ring_addr = rx_bufq->rx_ring_phys_addr;
+ rxq_info[0].desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ rxq_info[0].rx_buffer_low_watermark = CPFL_RXBUF_LOW_WATERMARK;
+ rxq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ rxq_info[0].data_buffer_size = rx_bufq->rx_buf_len;
+ rxq_info[0].buffer_notif_stride = CPFL_RX_BUF_STRIDE;
+
+ return idpf_vc_rxq_config_by_info(&cpfl_vport->base, rxq_info, 1);
+}
+
+int
+cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq)
+{
+ struct virtchnl2_rxq_info rxq_info[1] = {0};
+ struct idpf_rx_queue *rxq = &cpfl_rxq->base;
+
+ rxq_info[0].type = VIRTCHNL2_QUEUE_TYPE_RX;
+ rxq_info[0].queue_id = rxq->queue_id;
+ rxq_info[0].ring_len = rxq->nb_rx_desc;
+ rxq_info[0].dma_ring_addr = rxq->rx_ring_phys_addr;
+ rxq_info[0].rx_bufq1_id = rxq->bufq1->queue_id;
+ rxq_info[0].max_pkt_size = vport->max_pkt_len;
+ rxq_info[0].desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ rxq_info[0].qflags |= VIRTCHNL2_RX_DESC_SIZE_16BYTE;
+
+ rxq_info[0].data_buffer_size = rxq->rx_buf_len;
+ rxq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ rxq_info[0].rx_buffer_low_watermark = CPFL_RXBUF_LOW_WATERMARK;
+
+ PMD_DRV_LOG(NOTICE, "hairpin: vport %u, Rxq id 0x%x",
+ vport->vport_id, rxq_info[0].queue_id);
+
+ return idpf_vc_rxq_config_by_info(vport, rxq_info, 1);
+}
+
+int
+cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport)
+{
+ struct idpf_tx_queue *tx_complq = cpfl_vport->p2p_tx_complq;
+ struct virtchnl2_txq_info txq_info[1] = {0};
+
+ txq_info[0].dma_ring_addr = tx_complq->tx_ring_phys_addr;
+ txq_info[0].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ txq_info[0].queue_id = tx_complq->queue_id;
+ txq_info[0].ring_len = tx_complq->nb_tx_desc;
+ txq_info[0].peer_rx_queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+ txq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ txq_info[0].sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+
+ return idpf_vc_txq_config_by_info(&cpfl_vport->base, txq_info, 1);
+}
+
+int
+cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq)
+{
+ struct idpf_tx_queue *txq = &cpfl_txq->base;
+ struct virtchnl2_txq_info txq_info[1] = {0};
+
+ txq_info[0].dma_ring_addr = txq->tx_ring_phys_addr;
+ txq_info[0].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ txq_info[0].queue_id = txq->queue_id;
+ txq_info[0].ring_len = txq->nb_tx_desc;
+ txq_info[0].tx_compl_queue_id = txq->complq->queue_id;
+ txq_info[0].relative_queue_id = txq->queue_id;
+ txq_info[0].peer_rx_queue_id = cpfl_txq->hairpin_info.peer_rxq_id;
+ txq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ txq_info[0].sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+
+ return idpf_vc_txq_config_by_info(vport, txq_info, 1);
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 06198d4aad..872ebc1bfd 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -32,12 +32,15 @@
#define CPFL_RING_BASE_ALIGN 128
#define CPFL_DEFAULT_RX_FREE_THRESH 32
+#define CPFL_RXBUF_LOW_WATERMARK 64
#define CPFL_DEFAULT_TX_RS_THRESH 32
#define CPFL_DEFAULT_TX_FREE_THRESH 32
#define CPFL_SUPPORT_CHAIN_NUM 5
+#define CPFL_RX_BUF_STRIDE 64
+
struct cpfl_rxq_hairpin_info {
bool hairpin_q; /* if rx queue is a hairpin queue */
uint16_t peer_txp;
@@ -95,4 +98,8 @@ int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
int cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc,
const struct rte_eth_hairpin_conf *conf);
+int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport);
+int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq);
+int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
+int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
#endif /* _CPFL_RXTX_H_ */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v5 08/13] common/idpf: add switch queue API
2023-05-31 10:18 ` [PATCH v5 00/13] net/cpfl: add hairpin queue support beilei.xing
` (6 preceding siblings ...)
2023-05-31 10:18 ` [PATCH v5 07/13] net/cpfl: support hairpin queue configuration beilei.xing
@ 2023-05-31 10:18 ` beilei.xing
2023-05-31 10:18 ` [PATCH v5 09/13] net/cpfl: support hairpin queue start/stop beilei.xing
` (5 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:18 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds idpf_vc_ena_dis_one_queue API.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 2 +-
drivers/common/idpf/idpf_common_virtchnl.h | 3 +++
drivers/common/idpf/version.map | 1 +
3 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index 211b44a88e..6455f640da 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -733,7 +733,7 @@ idpf_vc_vectors_dealloc(struct idpf_vport *vport)
return err;
}
-static int
+int
idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
uint32_t type, bool on)
{
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index db83761a5e..9ff5c38c26 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -71,6 +71,9 @@ __rte_internal
int idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
uint16_t num_qs);
__rte_internal
+int idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
+ uint32_t type, bool on);
+__rte_internal
int idpf_vc_queue_grps_del(struct idpf_vport *vport,
uint16_t num_q_grps,
struct virtchnl2_queue_group_id *qg_ids);
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 17e77884ce..25624732b0 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -40,6 +40,7 @@ INTERNAL {
idpf_vc_cmd_execute;
idpf_vc_ctlq_post_rx_buffs;
idpf_vc_ctlq_recv;
+ idpf_vc_ena_dis_one_queue;
idpf_vc_irq_map_unmap_config;
idpf_vc_one_msg_read;
idpf_vc_ptype_info_query;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v5 09/13] net/cpfl: support hairpin queue start/stop
2023-05-31 10:18 ` [PATCH v5 00/13] net/cpfl: add hairpin queue support beilei.xing
` (7 preceding siblings ...)
2023-05-31 10:18 ` [PATCH v5 08/13] common/idpf: add switch queue API beilei.xing
@ 2023-05-31 10:18 ` beilei.xing
2023-05-31 10:18 ` [PATCH v5 10/13] common/idpf: add irq map config API beilei.xing
` (4 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:18 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx hairpin queue start/stop.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 46 +++++++++
drivers/net/cpfl/cpfl_rxtx.c | 164 +++++++++++++++++++++++++++++----
drivers/net/cpfl/cpfl_rxtx.h | 15 +++
3 files changed, 207 insertions(+), 18 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index a06def06d0..2b99e58341 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -896,6 +896,52 @@ cpfl_start_queues(struct rte_eth_dev *dev)
}
}
+ /* For non-manual bind hairpin queues, enable Tx queue and Rx queue,
+ * then enable Tx completion queue and Rx buffer queue.
+ */
+ for (i = cpfl_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq->hairpin_info.hairpin_q && !cpfl_vport->p2p_manual_bind) {
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ i - cpfl_vport->nb_data_txq,
+ false, true);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u on",
+ i);
+ else
+ cpfl_txq->base.q_started = true;
+ }
+ }
+
+ for (i = cpfl_vport->nb_data_rxq; i < dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q && !cpfl_vport->p2p_manual_bind) {
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ i - cpfl_vport->nb_data_rxq,
+ true, true);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u on",
+ i);
+ else
+ cpfl_rxq->base.q_started = true;
+ }
+ }
+
+ if (!cpfl_vport->p2p_manual_bind &&
+ cpfl_vport->p2p_tx_complq != NULL &&
+ cpfl_vport->p2p_rx_bufq != NULL) {
+ err = cpfl_switch_hairpin_complq(cpfl_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq");
+ return err;
+ }
+ err = cpfl_switch_hairpin_bufq(cpfl_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Rx bufq");
+ return err;
+ }
+ }
+
return err;
}
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 9408c6e1a4..8d1f8a560b 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -1002,6 +1002,89 @@ cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq
return idpf_vc_txq_config_by_info(vport, txq_info, 1);
}
+int
+cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ queue_id = cpfl_vport->p2p_tx_complq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
+int
+cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
+int
+cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t logic_qid,
+ bool rx, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = rx ? VIRTCHNL2_QUEUE_TYPE_RX : VIRTCHNL2_QUEUE_TYPE_TX;
+
+ if (type == VIRTCHNL2_QUEUE_TYPE_RX)
+ queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid, logic_qid);
+ else
+ queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_start_qid, logic_qid);
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+ if (err)
+ return err;
+
+ return err;
+}
+
+static int
+cpfl_alloc_split_p2p_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+ volatile struct virtchnl2_p2p_rx_buf_desc *rxd;
+ struct rte_mbuf *mbuf = NULL;
+ uint64_t dma_addr;
+ uint16_t i;
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ mbuf = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!mbuf)) {
+ PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+ return -ENOMEM;
+ }
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+ rxd = &((volatile struct virtchnl2_p2p_rx_buf_desc *)(rxq->rx_ring))[i];
+ rxd->reserve0 = 0;
+ rxd->pkt_addr = dma_addr;
+ }
+
+ rxq->nb_rx_hold = 0;
+ /* The value written in the RX buffer queue tail register, must be a multiple of 8.*/
+ rxq->rx_tail = rxq->nb_rx_desc - CPFL_HAIRPIN_Q_TAIL_AUX_VALUE;
+
+ return 0;
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
@@ -1055,22 +1138,31 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
IDPF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
} else {
/* Split queue */
- err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
- return err;
- }
- err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
- return err;
+ if (cpfl_rxq->hairpin_info.hairpin_q) {
+ err = cpfl_alloc_split_p2p_rxq_mbufs(rxq->bufq1);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate p2p RX buffer queue mbuf");
+ return err;
+ }
+ } else {
+ err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
+ return err;
+ }
+ err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
+ return err;
+ }
}
rte_wmb();
/* Init the RX tail register. */
IDPF_PCI_REG_WRITE(rxq->bufq1->qrx_tail, rxq->bufq1->rx_tail);
- IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail);
+ if (rxq->bufq2)
+ IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail);
}
return err;
@@ -1177,7 +1269,12 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return -EINVAL;
cpfl_rxq = dev->data->rx_queues[rx_queue_id];
- err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ rx_queue_id - cpfl_vport->nb_data_txq,
+ true, false);
+ else
+ err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
rx_queue_id);
@@ -1191,10 +1288,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
idpf_qc_single_rx_queue_reset(rxq);
} else {
rxq->bufq1->ops->release_mbufs(rxq->bufq1);
- rxq->bufq2->ops->release_mbufs(rxq->bufq2);
- idpf_qc_split_rx_queue_reset(rxq);
+ if (rxq->bufq2)
+ rxq->bufq2->ops->release_mbufs(rxq->bufq2);
+ if (cpfl_rxq->hairpin_info.hairpin_q) {
+ cpfl_rx_hairpin_descq_reset(rxq);
+ cpfl_rx_hairpin_bufq_reset(rxq->bufq1);
+ } else {
+ idpf_qc_split_rx_queue_reset(rxq);
+ }
}
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ if (!cpfl_rxq->hairpin_info.hairpin_q)
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1213,7 +1317,12 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
cpfl_txq = dev->data->tx_queues[tx_queue_id];
- err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
+ if (cpfl_txq->hairpin_info.hairpin_q)
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ tx_queue_id - cpfl_vport->nb_data_txq,
+ false, false);
+ else
+ err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
tx_queue_id);
@@ -1226,10 +1335,17 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
idpf_qc_single_tx_queue_reset(txq);
} else {
- idpf_qc_split_tx_descq_reset(txq);
- idpf_qc_split_tx_complq_reset(txq->complq);
+ if (cpfl_txq->hairpin_info.hairpin_q) {
+ cpfl_tx_hairpin_descq_reset(txq);
+ cpfl_tx_hairpin_complq_reset(txq->complq);
+ } else {
+ idpf_qc_split_tx_descq_reset(txq);
+ idpf_qc_split_tx_complq_reset(txq->complq);
+ }
}
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ if (!cpfl_txq->hairpin_info.hairpin_q)
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1249,10 +1365,22 @@ cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
void
cpfl_stop_queues(struct rte_eth_dev *dev)
{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
struct cpfl_rx_queue *cpfl_rxq;
struct cpfl_tx_queue *cpfl_txq;
int i;
+ if (cpfl_vport->p2p_tx_complq != NULL) {
+ if (cpfl_switch_hairpin_complq(cpfl_vport, false) != 0)
+ PMD_DRV_LOG(ERR, "Failed to stop hairpin Tx complq");
+ }
+
+ if (cpfl_vport->p2p_rx_bufq != NULL) {
+ if (cpfl_switch_hairpin_bufq(cpfl_vport, false) != 0)
+ PMD_DRV_LOG(ERR, "Failed to stop hairpin Rx bufq");
+ }
+
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
if (cpfl_rxq == NULL)
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 872ebc1bfd..aacd087b56 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -41,6 +41,17 @@
#define CPFL_RX_BUF_STRIDE 64
+/* The value written in the RX buffer queue tail register,
+ * and in WritePTR field in the TX completion queue context,
+ * must be a multiple of 8.
+ */
+#define CPFL_HAIRPIN_Q_TAIL_AUX_VALUE 8
+
+struct virtchnl2_p2p_rx_buf_desc {
+ __le64 reserve0;
+ __le64 pkt_addr; /* Packet buffer address */
+};
+
struct cpfl_rxq_hairpin_info {
bool hairpin_q; /* if rx queue is a hairpin queue */
uint16_t peer_txp;
@@ -102,4 +113,8 @@ int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq);
int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
+int cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t qid,
+ bool rx, bool on);
#endif /* _CPFL_RXTX_H_ */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v5 10/13] common/idpf: add irq map config API
2023-05-31 10:18 ` [PATCH v5 00/13] net/cpfl: add hairpin queue support beilei.xing
` (8 preceding siblings ...)
2023-05-31 10:18 ` [PATCH v5 09/13] net/cpfl: support hairpin queue start/stop beilei.xing
@ 2023-05-31 10:18 ` beilei.xing
2023-05-31 10:18 ` [PATCH v5 11/13] net/cpfl: enable write back based on ITR expire beilei.xing
` (3 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:18 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch supports idpf_vport_irq_map_config_by_qids API.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_device.c | 75 ++++++++++++++++++++++++
drivers/common/idpf/idpf_common_device.h | 4 ++
drivers/common/idpf/version.map | 1 +
3 files changed, 80 insertions(+)
diff --git a/drivers/common/idpf/idpf_common_device.c b/drivers/common/idpf/idpf_common_device.c
index dc47551b17..cc4207a46e 100644
--- a/drivers/common/idpf/idpf_common_device.c
+++ b/drivers/common/idpf/idpf_common_device.c
@@ -667,6 +667,81 @@ idpf_vport_irq_map_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
return ret;
}
+int
+idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport, uint32_t *qids, uint16_t nb_rx_queues)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_queue_vector *qv_map;
+ struct idpf_hw *hw = &adapter->hw;
+ uint32_t dynctl_val, itrn_val;
+ uint32_t dynctl_reg_start;
+ uint32_t itrn_reg_start;
+ uint16_t i;
+ int ret;
+
+ qv_map = rte_zmalloc("qv_map",
+ nb_rx_queues *
+ sizeof(struct virtchnl2_queue_vector), 0);
+ if (qv_map == NULL) {
+ DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
+ nb_rx_queues);
+ ret = -ENOMEM;
+ goto qv_map_alloc_err;
+ }
+
+ /* Rx interrupt disabled, Map interrupt only for writeback */
+
+ /* The capability flags adapter->caps.other_caps should be
+ * compared with bit VIRTCHNL2_CAP_WB_ON_ITR here. The if
+ * condition should be updated when the FW can return the
+ * correct flag bits.
+ */
+ dynctl_reg_start =
+ vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
+ itrn_reg_start =
+ vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
+ dynctl_val = IDPF_READ_REG(hw, dynctl_reg_start);
+ DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x", dynctl_val);
+ itrn_val = IDPF_READ_REG(hw, itrn_reg_start);
+ DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val);
+ /* Force write-backs by setting WB_ON_ITR bit in DYN_CTL
+ * register. WB_ON_ITR and INTENA are mutually exclusive
+ * bits. Setting WB_ON_ITR bits means TX and RX Descs
+ * are written back based on ITR expiration irrespective
+ * of INTENA setting.
+ */
+ /* TBD: need to tune INTERVAL value for better performance. */
+ itrn_val = (itrn_val == 0) ? IDPF_DFLT_INTERVAL : itrn_val;
+ dynctl_val = VIRTCHNL2_ITR_IDX_0 <<
+ PF_GLINT_DYN_CTL_ITR_INDX_S |
+ PF_GLINT_DYN_CTL_WB_ON_ITR_M |
+ itrn_val << PF_GLINT_DYN_CTL_INTERVAL_S;
+ IDPF_WRITE_REG(hw, dynctl_reg_start, dynctl_val);
+
+ for (i = 0; i < nb_rx_queues; i++) {
+ /* map all queues to the same vector */
+ qv_map[i].queue_id = qids[i];
+ qv_map[i].vector_id =
+ vport->recv_vectors->vchunks.vchunks->start_vector_id;
+ }
+ vport->qv_map = qv_map;
+
+ ret = idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, true);
+ if (ret != 0) {
+ DRV_LOG(ERR, "config interrupt mapping failed");
+ goto config_irq_map_err;
+ }
+
+ return 0;
+
+config_irq_map_err:
+ rte_free(vport->qv_map);
+ vport->qv_map = NULL;
+
+qv_map_alloc_err:
+ return ret;
+}
+
int
idpf_vport_irq_unmap_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
{
diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index 112367dae8..f767ea7cec 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -200,5 +200,9 @@ int idpf_vport_info_init(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_info);
__rte_internal
void idpf_vport_stats_update(struct virtchnl2_vport_stats *oes, struct virtchnl2_vport_stats *nes);
+__rte_internal
+int idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport,
+ uint32_t *qids,
+ uint16_t nb_rx_queues);
#endif /* _IDPF_COMMON_DEVICE_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 25624732b0..0729f6b912 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -69,6 +69,7 @@ INTERNAL {
idpf_vport_info_init;
idpf_vport_init;
idpf_vport_irq_map_config;
+ idpf_vport_irq_map_config_by_qids;
idpf_vport_irq_unmap_config;
idpf_vport_rss_config;
idpf_vport_stats_update;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v5 11/13] net/cpfl: enable write back based on ITR expire
2023-05-31 10:18 ` [PATCH v5 00/13] net/cpfl: add hairpin queue support beilei.xing
` (9 preceding siblings ...)
2023-05-31 10:18 ` [PATCH v5 10/13] common/idpf: add irq map config API beilei.xing
@ 2023-05-31 10:18 ` beilei.xing
2023-05-31 10:18 ` [PATCH v5 12/13] net/cpfl: support peer ports get beilei.xing
` (2 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:18 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch enables write back based on ITR expire
(WR_ON_ITR) for hairpin queues.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 2b99e58341..850f1c0bc6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -735,11 +735,22 @@ cpfl_dev_configure(struct rte_eth_dev *dev)
static int
cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
{
+ uint32_t qids[CPFL_MAX_P2P_NB_QUEUES + IDPF_DEFAULT_RXQ_NUM] = {0};
struct cpfl_vport *cpfl_vport = dev->data->dev_private;
struct idpf_vport *vport = &cpfl_vport->base;
uint16_t nb_rx_queues = dev->data->nb_rx_queues;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i;
- return idpf_vport_irq_map_config(vport, nb_rx_queues);
+ for (i = 0; i < nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ qids[i] = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid,
+ (i - cpfl_vport->nb_data_rxq));
+ else
+ qids[i] = cpfl_hw_qid_get(vport->chunks_info.rx_start_qid, i);
+ }
+ return idpf_vport_irq_map_config_by_qids(vport, qids, nb_rx_queues);
}
/* Update hairpin_info for dev's tx hairpin queue */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v5 12/13] net/cpfl: support peer ports get
2023-05-31 10:18 ` [PATCH v5 00/13] net/cpfl: add hairpin queue support beilei.xing
` (10 preceding siblings ...)
2023-05-31 10:18 ` [PATCH v5 11/13] net/cpfl: enable write back based on ITR expire beilei.xing
@ 2023-05-31 10:18 ` beilei.xing
2023-05-31 10:18 ` [PATCH v5 13/13] net/cpfl: support hairpin bind/unbind beilei.xing
2023-05-31 10:25 ` [PATCH v6 00/13] net/cpfl: add hairpin queue support beilei.xing
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:18 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports get hairpin peer ports.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 40 ++++++++++++++++++++++++++++++++++
1 file changed, 40 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 850f1c0bc6..9fc7d3401f 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1080,6 +1080,45 @@ cpfl_dev_close(struct rte_eth_dev *dev)
return 0;
}
+static int
+cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
+ size_t len, uint32_t tx)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_tx_queue *txq;
+ struct idpf_rx_queue *rxq;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i, j;
+
+ if (len <= 0)
+ return -EINVAL;
+
+ if (cpfl_vport->p2p_q_chunks_info == NULL)
+ return -ENOTSUP;
+
+ if (tx > 0) {
+ for (i = cpfl_vport->nb_data_txq, j = 0; i < dev->data->nb_tx_queues; i++, j++) {
+ txq = dev->data->tx_queues[i];
+ if (txq == NULL)
+ return -EINVAL;
+ cpfl_txq = (struct cpfl_tx_queue *)txq;
+ peer_ports[j] = cpfl_txq->hairpin_info.peer_rxp;
+ }
+ } else if (tx == 0) {
+ for (i = cpfl_vport->nb_data_rxq, j = 0; i < dev->data->nb_rx_queues; i++, j++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq == NULL)
+ return -EINVAL;
+ cpfl_rxq = (struct cpfl_rx_queue *)rxq;
+ peer_ports[j] = cpfl_rxq->hairpin_info.peer_txp;
+ }
+ }
+
+ return j;
+}
+
static const struct eth_dev_ops cpfl_eth_dev_ops = {
.dev_configure = cpfl_dev_configure,
.dev_close = cpfl_dev_close,
@@ -1109,6 +1148,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.hairpin_cap_get = cpfl_hairpin_cap_get,
.rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
.tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
+ .hairpin_get_peer_ports = cpfl_hairpin_get_peer_ports,
};
static int
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v5 13/13] net/cpfl: support hairpin bind/unbind
2023-05-31 10:18 ` [PATCH v5 00/13] net/cpfl: add hairpin queue support beilei.xing
` (11 preceding siblings ...)
2023-05-31 10:18 ` [PATCH v5 12/13] net/cpfl: support peer ports get beilei.xing
@ 2023-05-31 10:18 ` beilei.xing
2023-05-31 10:25 ` [PATCH v6 00/13] net/cpfl: add hairpin queue support beilei.xing
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:18 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports hairpin_bind/unbind ops.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 137 +++++++++++++++++++++++++++++++++
1 file changed, 137 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 9fc7d3401f..ff36f02b11 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1119,6 +1119,141 @@ cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
return j;
}
+static int
+cpfl_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct idpf_vport *tx_vport = &cpfl_tx_vport->base;
+ struct cpfl_vport *cpfl_rx_vport;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct rte_eth_dev *peer_dev;
+ struct idpf_vport *rx_vport;
+ int err = 0;
+ int i;
+
+ err = cpfl_txq_hairpin_info_update(dev, rx_port);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to update Tx hairpin queue info.");
+ return err;
+ }
+
+ /* configure hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ err = cpfl_hairpin_txq_config(tx_vport, cpfl_txq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u", i);
+ return err;
+ }
+ }
+
+ err = cpfl_hairpin_tx_complq_config(cpfl_tx_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Tx completion queue");
+ return err;
+ }
+
+ peer_dev = &rte_eth_devices[rx_port];
+ cpfl_rx_vport = (struct cpfl_vport *)peer_dev->data->dev_private;
+ rx_vport = &cpfl_rx_vport->base;
+ cpfl_rxq_hairpin_mz_bind(peer_dev);
+
+ err = cpfl_hairpin_rx_bufq_config(cpfl_rx_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue");
+ return err;
+ }
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ err = cpfl_hairpin_rxq_config(rx_vport, cpfl_rxq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue %u", i);
+ return err;
+ }
+ err = cpfl_rx_queue_init(peer_dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u", i);
+ return err;
+ }
+ }
+
+ /* enable hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport,
+ i - cpfl_tx_vport->nb_data_txq,
+ false, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u on",
+ i);
+ return err;
+ }
+ cpfl_txq->base.q_started = true;
+ }
+
+ err = cpfl_switch_hairpin_complq(cpfl_tx_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq");
+ return err;
+ }
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport,
+ i - cpfl_rx_vport->nb_data_rxq,
+ true, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u on",
+ i);
+ }
+ cpfl_rxq->base.q_started = true;
+ }
+
+ err = cpfl_switch_hairpin_bufq(cpfl_rx_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Rx buffer queue");
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+cpfl_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port];
+ struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i;
+
+ /* disable hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport,
+ i - cpfl_tx_vport->nb_data_txq,
+ false, false);
+ cpfl_txq->base.q_started = false;
+ }
+
+ cpfl_switch_hairpin_complq(cpfl_tx_vport, false);
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport,
+ i - cpfl_rx_vport->nb_data_rxq,
+ true, false);
+ cpfl_rxq->base.q_started = false;
+ }
+
+ cpfl_switch_hairpin_bufq(cpfl_rx_vport, false);
+
+ return 0;
+}
+
static const struct eth_dev_ops cpfl_eth_dev_ops = {
.dev_configure = cpfl_dev_configure,
.dev_close = cpfl_dev_close,
@@ -1149,6 +1284,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
.tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
.hairpin_get_peer_ports = cpfl_hairpin_get_peer_ports,
+ .hairpin_bind = cpfl_hairpin_bind,
+ .hairpin_unbind = cpfl_hairpin_unbind,
};
static int
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v6 00/13] net/cpfl: add hairpin queue support
2023-05-31 10:18 ` [PATCH v5 00/13] net/cpfl: add hairpin queue support beilei.xing
` (12 preceding siblings ...)
2023-05-31 10:18 ` [PATCH v5 13/13] net/cpfl: support hairpin bind/unbind beilei.xing
@ 2023-05-31 10:25 ` beilei.xing
2023-05-31 10:25 ` [PATCH v6 01/13] net/cpfl: refine structures beilei.xing
` (13 more replies)
13 siblings, 14 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:25 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patchset adds hairpin queue support.
v2 changes:
- change hairpin rx queus configuration sequence.
- code refine.
v3 changes:
- Refine the patchset based on the latest code.
v4 change:
- Remove hairpin rx buffer queue's sw_ring.
- Change hairpin rx queus configuration sequence in cpfl_hairpin_bind function.
- Refind hairpin queue setup and release.
v5 change:
- Fix memory leak during queue setup.
- Refine hairpin Rxq/Txq start/stop.
v6 change:
- Add sign-off.
Beilei Xing (13):
net/cpfl: refine structures
common/idpf: support queue groups add/delete
net/cpfl: add haipin queue group during vport init
net/cpfl: support hairpin queue capbility get
net/cpfl: support hairpin queue setup and release
common/idpf: add queue config API
net/cpfl: support hairpin queue configuration
common/idpf: add switch queue API
net/cpfl: support hairpin queue start/stop
common/idpf: add irq map config API
net/cpfl: enable write back based on ITR expire
net/cpfl: support peer ports get
net/cpfl: support hairpin bind/unbind
drivers/common/idpf/idpf_common_device.c | 75 ++
drivers/common/idpf/idpf_common_device.h | 4 +
drivers/common/idpf/idpf_common_virtchnl.c | 138 +++-
drivers/common/idpf/idpf_common_virtchnl.h | 18 +
drivers/common/idpf/version.map | 6 +
drivers/net/cpfl/cpfl_ethdev.c | 610 ++++++++++++++--
drivers/net/cpfl/cpfl_ethdev.h | 35 +-
drivers/net/cpfl/cpfl_rxtx.c | 781 +++++++++++++++++++--
drivers/net/cpfl/cpfl_rxtx.h | 76 ++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 21 +-
10 files changed, 1645 insertions(+), 119 deletions(-)
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v6 01/13] net/cpfl: refine structures
2023-05-31 10:25 ` [PATCH v6 00/13] net/cpfl: add hairpin queue support beilei.xing
@ 2023-05-31 10:25 ` beilei.xing
2023-05-31 10:25 ` [PATCH v6 02/13] common/idpf: support queue groups add/delete beilei.xing
` (12 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:25 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch refines some structures to support hairpin queue,
cpfl_rx_queue/cpfl_tx_queue/cpfl_vport.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 85 +++++++-----
drivers/net/cpfl/cpfl_ethdev.h | 6 +-
drivers/net/cpfl/cpfl_rxtx.c | 175 +++++++++++++++++-------
drivers/net/cpfl/cpfl_rxtx.h | 8 ++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 17 +--
5 files changed, 196 insertions(+), 95 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 7528a14d05..e587155db6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -124,7 +124,8 @@ static int
cpfl_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct rte_eth_link new_link;
unsigned int i;
@@ -156,7 +157,8 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,
static int
cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
dev_info->max_rx_queues = base->caps.max_rx_q;
@@ -216,7 +218,8 @@ cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
static int
cpfl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
/* mtu setting is forbidden if port is start */
if (dev->data->dev_started) {
@@ -256,12 +259,12 @@ static uint64_t
cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
{
uint64_t mbuf_alloc_failed = 0;
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i = 0;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- mbuf_alloc_failed += __atomic_load_n(&rxq->rx_stats.mbuf_alloc_failed,
+ cpfl_rxq = dev->data->rx_queues[i];
+ mbuf_alloc_failed += __atomic_load_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,
__ATOMIC_RELAXED);
}
@@ -271,8 +274,8 @@ cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
static int
cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
int ret;
@@ -305,20 +308,20 @@ cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
static void
cpfl_reset_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- __atomic_store_n(&rxq->rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
+ cpfl_rxq = dev->data->rx_queues[i];
+ __atomic_store_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
}
}
static int
cpfl_dev_stats_reset(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
int ret;
@@ -343,8 +346,8 @@ static int cpfl_dev_xstats_reset(struct rte_eth_dev *dev)
static int cpfl_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned int n)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
unsigned int i;
int ret;
@@ -459,7 +462,8 @@ cpfl_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t idx, shift;
int ret = 0;
@@ -498,7 +502,8 @@ cpfl_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t idx, shift;
int ret = 0;
@@ -536,7 +541,8 @@ static int
cpfl_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
int ret = 0;
@@ -601,7 +607,8 @@ static int
cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
int ret = 0;
@@ -638,7 +645,8 @@ cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,
static int
cpfl_dev_configure(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct rte_eth_conf *conf = &dev->data->dev_conf;
struct idpf_adapter *base = vport->adapter;
int ret;
@@ -710,7 +718,8 @@ cpfl_dev_configure(struct rte_eth_dev *dev)
static int
cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
uint16_t nb_rx_queues = dev->data->nb_rx_queues;
return idpf_vport_irq_map_config(vport, nb_rx_queues);
@@ -719,14 +728,14 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
static int
cpfl_start_queues(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
- struct idpf_tx_queue *txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
int err = 0;
int i;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL || txq->tx_deferred_start)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
continue;
err = cpfl_tx_queue_start(dev, i);
if (err != 0) {
@@ -736,8 +745,8 @@ cpfl_start_queues(struct rte_eth_dev *dev)
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- if (rxq == NULL || rxq->rx_deferred_start)
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)
continue;
err = cpfl_rx_queue_start(dev, i);
if (err != 0) {
@@ -752,7 +761,8 @@ cpfl_start_queues(struct rte_eth_dev *dev)
static int
cpfl_dev_start(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(base);
uint16_t num_allocated_vectors = base->caps.num_allocated_vectors;
@@ -813,7 +823,8 @@ cpfl_dev_start(struct rte_eth_dev *dev)
static int
cpfl_dev_stop(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
if (dev->data->dev_started == 0)
return 0;
@@ -832,7 +843,8 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
static int
cpfl_dev_close(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
@@ -842,7 +854,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
adapter->cur_vport_nb--;
dev->data->dev_private = NULL;
adapter->vports[vport->sw_idx] = NULL;
- rte_free(vport);
+ rte_free(cpfl_vport);
return 0;
}
@@ -1047,7 +1059,7 @@ cpfl_find_vport(struct cpfl_adapter_ext *adapter, uint32_t vport_id)
int i;
for (i = 0; i < adapter->cur_vport_nb; i++) {
- vport = adapter->vports[i];
+ vport = &adapter->vports[i]->base;
if (vport->vport_id != vport_id)
continue;
else
@@ -1275,7 +1287,8 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter)
static int
cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_vport_param *param = init_params;
struct cpfl_adapter_ext *adapter = param->adapter;
/* for sending create vport virtchnl msg prepare */
@@ -1300,7 +1313,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
goto err;
}
- adapter->vports[param->idx] = vport;
+ adapter->vports[param->idx] = cpfl_vport;
adapter->cur_vports |= RTE_BIT32(param->devarg_id);
adapter->cur_vport_nb++;
@@ -1415,7 +1428,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
snprintf(name, sizeof(name), "cpfl_%s_vport_0",
pci_dev->device.name);
retval = rte_eth_dev_create(&pci_dev->device, name,
- sizeof(struct idpf_vport),
+ sizeof(struct cpfl_vport),
NULL, NULL, cpfl_dev_vport_init,
&vport_param);
if (retval != 0)
@@ -1433,7 +1446,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
pci_dev->device.name,
devargs.req_vports[i]);
retval = rte_eth_dev_create(&pci_dev->device, name,
- sizeof(struct idpf_vport),
+ sizeof(struct cpfl_vport),
NULL, NULL, cpfl_dev_vport_init,
&vport_param);
if (retval != 0)
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 200dfcac02..81fe9ac4c3 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -69,13 +69,17 @@ struct cpfl_devargs {
uint16_t req_vport_nb;
};
+struct cpfl_vport {
+ struct idpf_vport base;
+};
+
struct cpfl_adapter_ext {
TAILQ_ENTRY(cpfl_adapter_ext) next;
struct idpf_adapter base;
char name[CPFL_ADAPTER_NAME_LEN];
- struct idpf_vport **vports;
+ struct cpfl_vport **vports;
uint16_t max_vport_nb;
uint16_t cur_vports; /* bit mask of created vport */
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 75021c3c54..04a51b8d15 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -128,7 +128,8 @@ cpfl_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *rxq,
uint16_t nb_desc, unsigned int socket_id,
struct rte_mempool *mp, uint8_t bufq_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct idpf_hw *hw = &base->hw;
const struct rte_memzone *mz;
@@ -220,15 +221,69 @@ cpfl_rx_split_bufq_release(struct idpf_rx_queue *bufq)
rte_free(bufq);
}
+static void
+cpfl_rx_queue_release(void *rxq)
+{
+ struct cpfl_rx_queue *cpfl_rxq = rxq;
+ struct idpf_rx_queue *q = NULL;
+
+ if (cpfl_rxq == NULL)
+ return;
+
+ q = &cpfl_rxq->base;
+
+ /* Split queue */
+ if (!q->adapter->is_rx_singleq) {
+ if (q->bufq2)
+ cpfl_rx_split_bufq_release(q->bufq2);
+
+ if (q->bufq1)
+ cpfl_rx_split_bufq_release(q->bufq1);
+
+ rte_free(cpfl_rxq);
+ return;
+ }
+
+ /* Single queue */
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(cpfl_rxq);
+}
+
+static void
+cpfl_tx_queue_release(void *txq)
+{
+ struct cpfl_tx_queue *cpfl_txq = txq;
+ struct idpf_tx_queue *q = NULL;
+
+ if (cpfl_txq == NULL)
+ return;
+
+ q = &cpfl_txq->base;
+
+ if (q->complq) {
+ rte_memzone_free(q->complq->mz);
+ rte_free(q->complq);
+ }
+
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(cpfl_txq);
+}
+
int
cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct idpf_hw *hw = &base->hw;
+ struct cpfl_rx_queue *cpfl_rxq;
const struct rte_memzone *mz;
struct idpf_rx_queue *rxq;
uint16_t rx_free_thresh;
@@ -248,21 +303,23 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Free memory if needed */
if (dev->data->rx_queues[queue_idx] != NULL) {
- idpf_qc_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
dev->data->rx_queues[queue_idx] = NULL;
}
/* Setup Rx queue */
- rxq = rte_zmalloc_socket("cpfl rxq",
- sizeof(struct idpf_rx_queue),
+ cpfl_rxq = rte_zmalloc_socket("cpfl rxq",
+ sizeof(struct cpfl_rx_queue),
RTE_CACHE_LINE_SIZE,
socket_id);
- if (rxq == NULL) {
+ if (cpfl_rxq == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
ret = -ENOMEM;
goto err_rxq_alloc;
}
+ rxq = &cpfl_rxq->base;
+
is_splitq = !!(vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
rxq->mp = mp;
@@ -329,7 +386,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
rxq->q_set = true;
- dev->data->rx_queues[queue_idx] = rxq;
+ dev->data->rx_queues[queue_idx] = cpfl_rxq;
return 0;
@@ -349,7 +406,8 @@ cpfl_tx_complq_setup(struct rte_eth_dev *dev, struct idpf_tx_queue *txq,
uint16_t queue_idx, uint16_t nb_desc,
unsigned int socket_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
const struct rte_memzone *mz;
struct idpf_tx_queue *cq;
int ret;
@@ -397,9 +455,11 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t tx_rs_thresh, tx_free_thresh;
+ struct cpfl_tx_queue *cpfl_txq;
struct idpf_hw *hw = &base->hw;
const struct rte_memzone *mz;
struct idpf_tx_queue *txq;
@@ -419,21 +479,23 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Free memory if needed. */
if (dev->data->tx_queues[queue_idx] != NULL) {
- idpf_qc_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
dev->data->tx_queues[queue_idx] = NULL;
}
/* Allocate the TX queue data structure. */
- txq = rte_zmalloc_socket("cpfl txq",
- sizeof(struct idpf_tx_queue),
+ cpfl_txq = rte_zmalloc_socket("cpfl txq",
+ sizeof(struct cpfl_tx_queue),
RTE_CACHE_LINE_SIZE,
socket_id);
- if (txq == NULL) {
+ if (cpfl_txq == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
ret = -ENOMEM;
goto err_txq_alloc;
}
+ txq = &cpfl_txq->base;
+
is_splitq = !!(vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
txq->nb_tx_desc = nb_desc;
@@ -487,7 +549,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
queue_idx * vport->chunks_info.tx_qtail_spacing);
txq->ops = &def_txq_ops;
txq->q_set = true;
- dev->data->tx_queues[queue_idx] = txq;
+ dev->data->tx_queues[queue_idx] = cpfl_txq;
return 0;
@@ -503,6 +565,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
+ struct cpfl_rx_queue *cpfl_rxq;
struct idpf_rx_queue *rxq;
uint16_t max_pkt_len;
uint32_t frame_size;
@@ -511,7 +574,8 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (rx_queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
- rxq = dev->data->rx_queues[rx_queue_id];
+ cpfl_rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = &cpfl_rxq->base;
if (rxq == NULL || !rxq->q_set) {
PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
@@ -575,9 +639,10 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_rx_queue *rxq =
- dev->data->rx_queues[rx_queue_id];
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq = dev->data->rx_queues[rx_queue_id];
+ struct idpf_rx_queue *rxq = &cpfl_rxq->base;
int err = 0;
err = idpf_vc_rxq_config(vport, rxq);
@@ -610,15 +675,15 @@ cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
if (tx_queue_id >= dev->data->nb_tx_queues)
return -EINVAL;
- txq = dev->data->tx_queues[tx_queue_id];
+ cpfl_txq = dev->data->tx_queues[tx_queue_id];
/* Init the RX tail register. */
- IDPF_PCI_REG_WRITE(txq->qtx_tail, 0);
+ IDPF_PCI_REG_WRITE(cpfl_txq->base.qtx_tail, 0);
return 0;
}
@@ -626,12 +691,13 @@ cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
int
cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_tx_queue *txq =
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_tx_queue *cpfl_txq =
dev->data->tx_queues[tx_queue_id];
int err = 0;
- err = idpf_vc_txq_config(vport, txq);
+ err = idpf_vc_txq_config(vport, &cpfl_txq->base);
if (err != 0) {
PMD_DRV_LOG(ERR, "Fail to configure Tx queue %u", tx_queue_id);
return err;
@@ -650,7 +716,7 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
tx_queue_id);
} else {
- txq->q_started = true;
+ cpfl_txq->base.q_started = true;
dev->data->tx_queue_state[tx_queue_id] =
RTE_ETH_QUEUE_STATE_STARTED;
}
@@ -661,13 +727,16 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
int
cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq;
struct idpf_rx_queue *rxq;
int err;
if (rx_queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
+ cpfl_rxq = dev->data->rx_queues[rx_queue_id];
err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
@@ -675,7 +744,7 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return err;
}
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = &cpfl_rxq->base;
rxq->q_started = false;
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
rxq->ops->release_mbufs(rxq);
@@ -693,13 +762,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_tx_queue *cpfl_txq;
struct idpf_tx_queue *txq;
int err;
if (tx_queue_id >= dev->data->nb_tx_queues)
return -EINVAL;
+ cpfl_txq = dev->data->tx_queues[tx_queue_id];
+
err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
@@ -707,7 +780,7 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
return err;
}
- txq = dev->data->tx_queues[tx_queue_id];
+ txq = &cpfl_txq->base;
txq->q_started = false;
txq->ops->release_mbufs(txq);
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
@@ -724,25 +797,25 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
void
cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- idpf_qc_rx_queue_release(dev->data->rx_queues[qid]);
+ cpfl_rx_queue_release(dev->data->rx_queues[qid]);
}
void
cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- idpf_qc_tx_queue_release(dev->data->tx_queues[qid]);
+ cpfl_tx_queue_release(dev->data->tx_queues[qid]);
}
void
cpfl_stop_queues(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
- struct idpf_tx_queue *txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
int i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- if (rxq == NULL)
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq == NULL)
continue;
if (cpfl_rx_queue_stop(dev, i) != 0)
@@ -750,8 +823,8 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL)
continue;
if (cpfl_tx_queue_stop(dev, i) != 0)
@@ -762,9 +835,10 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
void
cpfl_set_rx_function(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
#ifdef RTE_ARCH_X86
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i;
if (cpfl_rx_vec_dev_check_default(dev) == CPFL_VECTOR_PATH &&
@@ -790,8 +864,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- (void)idpf_qc_splitq_rx_vec_setup(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ (void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
if (vport->rx_use_avx512) {
@@ -810,8 +884,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
} else {
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- (void)idpf_qc_singleq_rx_vec_setup(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ (void)idpf_qc_singleq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
if (vport->rx_use_avx512) {
@@ -860,10 +934,11 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
void
cpfl_set_tx_function(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
#ifdef RTE_ARCH_X86
#ifdef CC_AVX512_SUPPORT
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
int i;
#endif /* CC_AVX512_SUPPORT */
@@ -878,8 +953,8 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
vport->tx_use_avx512 = true;
if (vport->tx_use_avx512) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- idpf_qc_tx_vec_avx512_setup(txq);
+ cpfl_txq = dev->data->tx_queues[i];
+ idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);
}
}
}
@@ -916,10 +991,10 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
#ifdef CC_AVX512_SUPPORT
if (vport->tx_use_avx512) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL)
continue;
- idpf_qc_tx_vec_avx512_setup(txq);
+ idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);
}
PMD_DRV_LOG(NOTICE,
"Using Single AVX512 Vector Tx (port %d).",
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index fb267d38c8..bfb9ad97bd 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -23,6 +23,14 @@
#define CPFL_SUPPORT_CHAIN_NUM 5
+struct cpfl_rx_queue {
+ struct idpf_rx_queue base;
+};
+
+struct cpfl_tx_queue {
+ struct idpf_tx_queue base;
+};
+
int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
index 665418d27d..5690b17911 100644
--- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
+++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
@@ -76,15 +76,16 @@ cpfl_rx_splitq_vec_default(struct idpf_rx_queue *rxq)
static inline int
cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_rx_queue *rxq;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq;
int i, default_ret, splitq_ret, ret = CPFL_SCALAR_PATH;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- default_ret = cpfl_rx_vec_queue_default(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
- splitq_ret = cpfl_rx_splitq_vec_default(rxq);
+ splitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq->base);
ret = splitq_ret && default_ret;
} else {
ret = default_ret;
@@ -100,12 +101,12 @@ static inline int
cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)
{
int i;
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
int ret = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- ret = cpfl_tx_vec_queue_default(txq);
+ cpfl_txq = dev->data->tx_queues[i];
+ ret = cpfl_tx_vec_queue_default(&cpfl_txq->base);
if (ret == CPFL_SCALAR_PATH)
return CPFL_SCALAR_PATH;
}
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v6 02/13] common/idpf: support queue groups add/delete
2023-05-31 10:25 ` [PATCH v6 00/13] net/cpfl: add hairpin queue support beilei.xing
2023-05-31 10:25 ` [PATCH v6 01/13] net/cpfl: refine structures beilei.xing
@ 2023-05-31 10:25 ` beilei.xing
2023-05-31 10:25 ` [PATCH v6 03/13] net/cpfl: add haipin queue group during vport init beilei.xing
` (11 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:25 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds queue group add/delete virtual channel support.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 66 ++++++++++++++++++++++
drivers/common/idpf/idpf_common_virtchnl.h | 9 +++
drivers/common/idpf/version.map | 2 +
3 files changed, 77 insertions(+)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index b713678634..a3fe55c897 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -359,6 +359,72 @@ idpf_vc_vport_destroy(struct idpf_vport *vport)
return err;
}
+int
+idpf_vc_queue_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *p2p_queue_grps_info,
+ uint8_t *p2p_queue_grps_out)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_cmd_info args;
+ int size, qg_info_size;
+ int err = -1;
+
+ size = sizeof(*p2p_queue_grps_info) +
+ (p2p_queue_grps_info->qg_info.num_queue_groups - 1) *
+ sizeof(struct virtchnl2_queue_group_info);
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_ADD_QUEUE_GROUPS;
+ args.in_args = (uint8_t *)p2p_queue_grps_info;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ if (err != 0) {
+ DRV_LOG(ERR,
+ "Failed to execute command of VIRTCHNL2_OP_ADD_QUEUE_GROUPS");
+ return err;
+ }
+
+ rte_memcpy(p2p_queue_grps_out, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
+ return 0;
+}
+
+int idpf_vc_queue_grps_del(struct idpf_vport *vport,
+ uint16_t num_q_grps,
+ struct virtchnl2_queue_group_id *qg_ids)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_delete_queue_groups *vc_del_q_grps;
+ struct idpf_cmd_info args;
+ int size;
+ int err;
+
+ size = sizeof(*vc_del_q_grps) +
+ (num_q_grps - 1) * sizeof(struct virtchnl2_queue_group_id);
+ vc_del_q_grps = rte_zmalloc("vc_del_q_grps", size, 0);
+
+ vc_del_q_grps->vport_id = vport->vport_id;
+ vc_del_q_grps->num_queue_groups = num_q_grps;
+ memcpy(vc_del_q_grps->qg_ids, qg_ids,
+ num_q_grps * sizeof(struct virtchnl2_queue_group_id));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_DEL_QUEUE_GROUPS;
+ args.in_args = (uint8_t *)vc_del_q_grps;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_DEL_QUEUE_GROUPS");
+
+ rte_free(vc_del_q_grps);
+ return err;
+}
+
int
idpf_vc_rss_key_set(struct idpf_vport *vport)
{
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index c45295290e..58b16e1c5d 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -64,4 +64,13 @@ int idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
__rte_internal
int idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
u16 *buff_count, struct idpf_dma_mem **buffs);
+__rte_internal
+int idpf_vc_queue_grps_del(struct idpf_vport *vport,
+ uint16_t num_q_grps,
+ struct virtchnl2_queue_group_id *qg_ids);
+__rte_internal
+int
+idpf_vc_queue_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *ptp_queue_grps_info,
+ uint8_t *ptp_queue_grps_out);
#endif /* _IDPF_COMMON_VIRTCHNL_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 70334a1b03..01d18f3f3f 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -43,6 +43,8 @@ INTERNAL {
idpf_vc_irq_map_unmap_config;
idpf_vc_one_msg_read;
idpf_vc_ptype_info_query;
+ idpf_vc_queue_grps_add;
+ idpf_vc_queue_grps_del;
idpf_vc_queue_switch;
idpf_vc_queues_ena_dis;
idpf_vc_rss_hash_get;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v6 03/13] net/cpfl: add haipin queue group during vport init
2023-05-31 10:25 ` [PATCH v6 00/13] net/cpfl: add hairpin queue support beilei.xing
2023-05-31 10:25 ` [PATCH v6 01/13] net/cpfl: refine structures beilei.xing
2023-05-31 10:25 ` [PATCH v6 02/13] common/idpf: support queue groups add/delete beilei.xing
@ 2023-05-31 10:25 ` beilei.xing
2023-05-31 10:25 ` [PATCH v6 04/13] net/cpfl: support hairpin queue capbility get beilei.xing
` (10 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:25 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds haipin queue group during vport init.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 133 +++++++++++++++++++++++++++++++++
drivers/net/cpfl/cpfl_ethdev.h | 18 +++++
drivers/net/cpfl/cpfl_rxtx.h | 7 ++
3 files changed, 158 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index e587155db6..c1273a7478 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -840,6 +840,20 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
return 0;
}
+static int
+cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
+{
+ struct virtchnl2_queue_group_id qg_ids[CPFL_P2P_NB_QUEUE_GRPS] = {0};
+ int ret = 0;
+
+ qg_ids[0].queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
+ qg_ids[0].queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
+ ret = idpf_vc_queue_grps_del(vport, CPFL_P2P_NB_QUEUE_GRPS, qg_ids);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to delete p2p queue groups");
+ return ret;
+}
+
static int
cpfl_dev_close(struct rte_eth_dev *dev)
{
@@ -848,7 +862,12 @@ cpfl_dev_close(struct rte_eth_dev *dev)
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
+
+ if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
+ cpfl_p2p_queue_grps_del(vport);
+
idpf_vport_deinit(vport);
+ rte_free(cpfl_vport->p2p_q_chunks_info);
adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
adapter->cur_vport_nb--;
@@ -1284,6 +1303,96 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter)
return vport_idx;
}
+static int
+cpfl_p2p_q_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *p2p_queue_grps_info,
+ uint8_t *p2p_q_vc_out_info)
+{
+ int ret;
+
+ p2p_queue_grps_info->vport_id = vport->vport_id;
+ p2p_queue_grps_info->qg_info.num_queue_groups = CPFL_P2P_NB_QUEUE_GRPS;
+ p2p_queue_grps_info->qg_info.groups[0].num_rx_q = CPFL_MAX_P2P_NB_QUEUES;
+ p2p_queue_grps_info->qg_info.groups[0].num_rx_bufq = CPFL_P2P_NB_RX_BUFQ;
+ p2p_queue_grps_info->qg_info.groups[0].num_tx_q = CPFL_MAX_P2P_NB_QUEUES;
+ p2p_queue_grps_info->qg_info.groups[0].num_tx_complq = CPFL_P2P_NB_TX_COMPLQ;
+ p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
+ p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
+ p2p_queue_grps_info->qg_info.groups[0].rx_q_grp_info.rss_lut_size = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.tx_tc = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.priority = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.is_sp = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.pir_weight = 0;
+
+ ret = idpf_vc_queue_grps_add(vport, p2p_queue_grps_info, p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to add p2p queue groups.");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
+ struct virtchnl2_add_queue_groups *p2p_q_vc_out_info)
+{
+ struct p2p_queue_chunks_info *p2p_q_chunks_info = cpfl_vport->p2p_q_chunks_info;
+ struct virtchnl2_queue_reg_chunks *vc_chunks_out;
+ int i, type;
+
+ if (p2p_q_vc_out_info->qg_info.groups[0].qg_id.queue_group_type !=
+ VIRTCHNL2_QUEUE_GROUP_P2P) {
+ PMD_DRV_LOG(ERR, "Add queue group response mismatch.");
+ return -EINVAL;
+ }
+
+ vc_chunks_out = &p2p_q_vc_out_info->qg_info.groups[0].chunks;
+
+ for (i = 0; i < vc_chunks_out->num_chunks; i++) {
+ type = vc_chunks_out->chunks[i].type;
+ switch (type) {
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ p2p_q_chunks_info->tx_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->tx_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->tx_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ p2p_q_chunks_info->rx_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->rx_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->rx_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ p2p_q_chunks_info->tx_compl_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->tx_compl_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->tx_compl_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ p2p_q_chunks_info->rx_buf_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->rx_buf_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->rx_buf_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported queue type");
+ break;
+ }
+ }
+
+ return 0;
+}
+
static int
cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
{
@@ -1293,6 +1402,8 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
struct cpfl_adapter_ext *adapter = param->adapter;
/* for sending create vport virtchnl msg prepare */
struct virtchnl2_create_vport create_vport_info;
+ struct virtchnl2_add_queue_groups p2p_queue_grps_info;
+ uint8_t p2p_q_vc_out_info[IDPF_DFLT_MBX_BUF_SIZE] = {0};
int ret = 0;
dev->dev_ops = &cpfl_eth_dev_ops;
@@ -1327,6 +1438,28 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
&dev->data->mac_addrs[0]);
+ if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
+ memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
+ ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to add p2p queue group.");
+ return 0;
+ }
+ cpfl_vport->p2p_q_chunks_info = rte_zmalloc(NULL,
+ sizeof(struct p2p_queue_chunks_info), 0);
+ if (cpfl_vport->p2p_q_chunks_info == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate p2p queue info.");
+ cpfl_p2p_queue_grps_del(vport);
+ return 0;
+ }
+ ret = cpfl_p2p_queue_info_init(cpfl_vport,
+ (struct virtchnl2_add_queue_groups *)p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to init p2p queue info.");
+ cpfl_p2p_queue_grps_del(vport);
+ }
+ }
+
return 0;
err_mac_addrs:
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 81fe9ac4c3..666d46a44a 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -56,6 +56,7 @@
/* Device IDs */
#define IDPF_DEV_ID_CPF 0x1453
+#define VIRTCHNL2_QUEUE_GROUP_P2P 0x100
struct cpfl_vport_param {
struct cpfl_adapter_ext *adapter;
@@ -69,8 +70,25 @@ struct cpfl_devargs {
uint16_t req_vport_nb;
};
+struct p2p_queue_chunks_info {
+ uint32_t tx_start_qid;
+ uint32_t rx_start_qid;
+ uint32_t tx_compl_start_qid;
+ uint32_t rx_buf_start_qid;
+
+ uint64_t tx_qtail_start;
+ uint32_t tx_qtail_spacing;
+ uint64_t rx_qtail_start;
+ uint32_t rx_qtail_spacing;
+ uint64_t tx_compl_qtail_start;
+ uint32_t tx_compl_qtail_spacing;
+ uint64_t rx_buf_qtail_start;
+ uint32_t rx_buf_qtail_spacing;
+};
+
struct cpfl_vport {
struct idpf_vport base;
+ struct p2p_queue_chunks_info *p2p_q_chunks_info;
};
struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index bfb9ad97bd..1fe65778f0 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -13,6 +13,13 @@
#define CPFL_MIN_RING_DESC 32
#define CPFL_MAX_RING_DESC 4096
#define CPFL_DMA_MEM_ALIGN 4096
+
+#define CPFL_MAX_P2P_NB_QUEUES 16
+#define CPFL_P2P_NB_RX_BUFQ 1
+#define CPFL_P2P_NB_TX_COMPLQ 1
+#define CPFL_P2P_NB_QUEUE_GRPS 1
+#define CPFL_P2P_QUEUE_GRP_ID 1
+
/* Base address of the HW descriptor ring should be 128B aligned. */
#define CPFL_RING_BASE_ALIGN 128
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v6 04/13] net/cpfl: support hairpin queue capbility get
2023-05-31 10:25 ` [PATCH v6 00/13] net/cpfl: add hairpin queue support beilei.xing
` (2 preceding siblings ...)
2023-05-31 10:25 ` [PATCH v6 03/13] net/cpfl: add haipin queue group during vport init beilei.xing
@ 2023-05-31 10:25 ` beilei.xing
2023-05-31 10:25 ` [PATCH v6 05/13] net/cpfl: support hairpin queue setup and release beilei.xing
` (9 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:25 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch adds hairpin_cap_get ops support.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 18 ++++++++++++++++++
drivers/net/cpfl/cpfl_rxtx.h | 3 +++
2 files changed, 21 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index c1273a7478..40b4515539 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -154,6 +154,23 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,
return rte_eth_linkstatus_set(dev, &new_link);
}
+static int
+cpfl_hairpin_cap_get(struct rte_eth_dev *dev,
+ struct rte_eth_hairpin_cap *cap)
+{
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+
+ if (cpfl_vport->p2p_q_chunks_info == NULL)
+ return -ENOTSUP;
+
+ cap->max_nb_queues = CPFL_MAX_P2P_NB_QUEUES;
+ cap->max_rx_2_tx = CPFL_MAX_HAIRPINQ_RX_2_TX;
+ cap->max_tx_2_rx = CPFL_MAX_HAIRPINQ_TX_2_RX;
+ cap->max_nb_desc = CPFL_MAX_HAIRPINQ_NB_DESC;
+
+ return 0;
+}
+
static int
cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
@@ -904,6 +921,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.xstats_get = cpfl_dev_xstats_get,
.xstats_get_names = cpfl_dev_xstats_get_names,
.xstats_reset = cpfl_dev_xstats_reset,
+ .hairpin_cap_get = cpfl_hairpin_cap_get,
};
static int
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 1fe65778f0..a4a164d462 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -14,6 +14,9 @@
#define CPFL_MAX_RING_DESC 4096
#define CPFL_DMA_MEM_ALIGN 4096
+#define CPFL_MAX_HAIRPINQ_RX_2_TX 1
+#define CPFL_MAX_HAIRPINQ_TX_2_RX 1
+#define CPFL_MAX_HAIRPINQ_NB_DESC 1024
#define CPFL_MAX_P2P_NB_QUEUES 16
#define CPFL_P2P_NB_RX_BUFQ 1
#define CPFL_P2P_NB_TX_COMPLQ 1
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v6 05/13] net/cpfl: support hairpin queue setup and release
2023-05-31 10:25 ` [PATCH v6 00/13] net/cpfl: add hairpin queue support beilei.xing
` (3 preceding siblings ...)
2023-05-31 10:25 ` [PATCH v6 04/13] net/cpfl: support hairpin queue capbility get beilei.xing
@ 2023-05-31 10:25 ` beilei.xing
2023-05-31 10:25 ` [PATCH v6 06/13] common/idpf: add queue config API beilei.xing
` (8 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:25 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
Support hairpin Rx/Tx queue setup and release.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 6 +
drivers/net/cpfl/cpfl_ethdev.h | 11 +
drivers/net/cpfl/cpfl_rxtx.c | 364 +++++++++++++++++++++++-
drivers/net/cpfl/cpfl_rxtx.h | 36 +++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 4 +
5 files changed, 420 insertions(+), 1 deletion(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 40b4515539..b17c538ec2 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -879,6 +879,10 @@ cpfl_dev_close(struct rte_eth_dev *dev)
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
+ if (cpfl_vport->p2p_mp) {
+ rte_mempool_free(cpfl_vport->p2p_mp);
+ cpfl_vport->p2p_mp = NULL;
+ }
if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
cpfl_p2p_queue_grps_del(vport);
@@ -922,6 +926,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.xstats_get_names = cpfl_dev_xstats_get_names,
.xstats_reset = cpfl_dev_xstats_reset,
.hairpin_cap_get = cpfl_hairpin_cap_get,
+ .rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
+ .tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
};
static int
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 666d46a44a..2e42354f70 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -89,6 +89,17 @@ struct p2p_queue_chunks_info {
struct cpfl_vport {
struct idpf_vport base;
struct p2p_queue_chunks_info *p2p_q_chunks_info;
+
+ struct rte_mempool *p2p_mp;
+
+ uint16_t nb_data_rxq;
+ uint16_t nb_data_txq;
+ uint16_t nb_p2p_rxq;
+ uint16_t nb_p2p_txq;
+
+ struct idpf_rx_queue *p2p_rx_bufq;
+ struct idpf_tx_queue *p2p_tx_complq;
+ bool p2p_manual_bind;
};
struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 04a51b8d15..90b408d1f4 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -10,6 +10,67 @@
#include "cpfl_rxtx.h"
#include "cpfl_rxtx_vec_common.h"
+static inline void
+cpfl_tx_hairpin_descq_reset(struct idpf_tx_queue *txq)
+{
+ uint32_t i, size;
+
+ if (!txq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+ return;
+ }
+
+ size = txq->nb_tx_desc * CPFL_P2P_DESC_LEN;
+ for (i = 0; i < size; i++)
+ ((volatile char *)txq->desc_ring)[i] = 0;
+}
+
+static inline void
+cpfl_tx_hairpin_complq_reset(struct idpf_tx_queue *cq)
+{
+ uint32_t i, size;
+
+ if (!cq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to complq is NULL");
+ return;
+ }
+
+ size = cq->nb_tx_desc * CPFL_P2P_DESC_LEN;
+ for (i = 0; i < size; i++)
+ ((volatile char *)cq->compl_ring)[i] = 0;
+}
+
+static inline void
+cpfl_rx_hairpin_descq_reset(struct idpf_rx_queue *rxq)
+{
+ uint16_t len;
+ uint32_t i;
+
+ if (!rxq)
+ return;
+
+ len = rxq->nb_rx_desc;
+ for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
+ ((volatile char *)rxq->rx_ring)[i] = 0;
+}
+
+static inline void
+cpfl_rx_hairpin_bufq_reset(struct idpf_rx_queue *rxbq)
+{
+ uint16_t len;
+ uint32_t i;
+
+ if (!rxbq)
+ return;
+
+ len = rxbq->nb_rx_desc;
+ for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
+ ((volatile char *)rxbq->rx_ring)[i] = 0;
+
+ rxbq->bufq1 = NULL;
+ rxbq->bufq2 = NULL;
+}
+
static uint64_t
cpfl_rx_offload_convert(uint64_t offload)
{
@@ -234,7 +295,10 @@ cpfl_rx_queue_release(void *rxq)
/* Split queue */
if (!q->adapter->is_rx_singleq) {
- if (q->bufq2)
+ /* the mz is shared between Tx/Rx hairpin, let Rx_release
+ * free the buf, q->bufq1->mz and q->mz.
+ */
+ if (!cpfl_rxq->hairpin_info.hairpin_q && q->bufq2)
cpfl_rx_split_bufq_release(q->bufq2);
if (q->bufq1)
@@ -385,6 +449,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
}
+ cpfl_vport->nb_data_rxq++;
rxq->q_set = true;
dev->data->rx_queues[queue_idx] = cpfl_rxq;
@@ -548,6 +613,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
queue_idx * vport->chunks_info.tx_qtail_spacing);
txq->ops = &def_txq_ops;
+ cpfl_vport->nb_data_txq++;
txq->q_set = true;
dev->data->tx_queues[queue_idx] = cpfl_txq;
@@ -562,6 +628,300 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return ret;
}
+static int
+cpfl_rx_hairpin_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,
+ uint16_t logic_qid, uint16_t nb_desc)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct rte_mempool *mp;
+ char pool_name[RTE_MEMPOOL_NAMESIZE];
+
+ mp = cpfl_vport->p2p_mp;
+ if (!mp) {
+ snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "p2p_mb_pool_%u",
+ dev->data->port_id);
+ mp = rte_pktmbuf_pool_create(pool_name, CPFL_P2P_NB_MBUF * CPFL_MAX_P2P_NB_QUEUES,
+ CPFL_P2P_CACHE_SIZE, 0, CPFL_P2P_MBUF_SIZE,
+ dev->device->numa_node);
+ if (!mp) {
+ PMD_INIT_LOG(ERR, "Failed to allocate mbuf pool for p2p");
+ return -ENOMEM;
+ }
+ cpfl_vport->p2p_mp = mp;
+ }
+
+ bufq->mp = mp;
+ bufq->nb_rx_desc = nb_desc;
+ bufq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_buf_start_qid,
+ logic_qid);
+ bufq->port_id = dev->data->port_id;
+ bufq->adapter = adapter;
+ bufq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
+
+ bufq->q_set = true;
+ bufq->ops = &def_rxq_ops;
+
+ return 0;
+}
+
+int
+cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf)
+{
+ struct cpfl_vport *cpfl_vport = (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter_base = vport->adapter;
+ uint16_t logic_qid = cpfl_vport->nb_p2p_rxq;
+ struct cpfl_rxq_hairpin_info *hairpin_info;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct idpf_rx_queue *bufq1 = NULL;
+ struct idpf_rx_queue *rxq;
+ uint16_t peer_port, peer_q;
+ uint16_t qid;
+ int ret;
+
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue.");
+ return -EINVAL;
+ }
+
+ if (conf->peer_count != 1) {
+ PMD_INIT_LOG(ERR, "Can't support Rx hairpin queue peer count %d", conf->peer_count);
+ return -EINVAL;
+ }
+
+ peer_port = conf->peers[0].port;
+ peer_q = conf->peers[0].queue;
+
+ if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
+ nb_desc > CPFL_MAX_RING_DESC ||
+ nb_desc < CPFL_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is invalid", nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory if needed */
+ if (dev->data->rx_queues[queue_idx]) {
+ cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* Setup Rx description queue */
+ cpfl_rxq = rte_zmalloc_socket("cpfl hairpin rxq",
+ sizeof(struct cpfl_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!cpfl_rxq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
+ return -ENOMEM;
+ }
+
+ rxq = &cpfl_rxq->base;
+ hairpin_info = &cpfl_rxq->hairpin_info;
+ rxq->nb_rx_desc = nb_desc * 2;
+ rxq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid, logic_qid);
+ rxq->port_id = dev->data->port_id;
+ rxq->adapter = adapter_base;
+ rxq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
+ hairpin_info->hairpin_q = true;
+ hairpin_info->peer_txp = peer_port;
+ hairpin_info->peer_txq_id = peer_q;
+
+ if (conf->manual_bind != 0)
+ cpfl_vport->p2p_manual_bind = true;
+ else
+ cpfl_vport->p2p_manual_bind = false;
+
+ if (cpfl_vport->p2p_rx_bufq == NULL) {
+ bufq1 = rte_zmalloc_socket("hairpin rx bufq1",
+ sizeof(struct idpf_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!bufq1) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for hairpin Rx buffer queue 1.");
+ ret = -ENOMEM;
+ goto err_alloc_bufq1;
+ }
+ qid = 2 * logic_qid;
+ ret = cpfl_rx_hairpin_bufq_setup(dev, bufq1, qid, nb_desc);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to setup hairpin Rx buffer queue 1");
+ ret = -EINVAL;
+ goto err_setup_bufq1;
+ }
+ cpfl_vport->p2p_rx_bufq = bufq1;
+ }
+
+ rxq->bufq1 = cpfl_vport->p2p_rx_bufq;
+ rxq->bufq2 = NULL;
+
+ cpfl_vport->nb_p2p_rxq++;
+ rxq->q_set = true;
+ dev->data->rx_queues[queue_idx] = cpfl_rxq;
+
+ return 0;
+
+err_setup_bufq1:
+ rte_mempool_free(cpfl_vport->p2p_mp);
+ rte_free(bufq1);
+err_alloc_bufq1:
+ rte_free(cpfl_rxq);
+
+ return ret;
+}
+
+int
+cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter_base = vport->adapter;
+ uint16_t logic_qid = cpfl_vport->nb_p2p_txq;
+ struct cpfl_txq_hairpin_info *hairpin_info;
+ struct idpf_hw *hw = &adapter_base->hw;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct idpf_tx_queue *txq, *cq;
+ const struct rte_memzone *mz;
+ uint32_t ring_size;
+ uint16_t peer_port, peer_q;
+ int ret;
+
+ if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue.");
+ return -EINVAL;
+ }
+
+ if (conf->peer_count != 1) {
+ PMD_INIT_LOG(ERR, "Can't support Tx hairpin queue peer count %d", conf->peer_count);
+ return -EINVAL;
+ }
+
+ peer_port = conf->peers[0].port;
+ peer_q = conf->peers[0].queue;
+
+ if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
+ nb_desc > CPFL_MAX_RING_DESC ||
+ nb_desc < CPFL_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is invalid",
+ nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory if needed. */
+ if (dev->data->tx_queues[queue_idx]) {
+ cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate the TX queue data structure. */
+ cpfl_txq = rte_zmalloc_socket("cpfl hairpin txq",
+ sizeof(struct cpfl_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!cpfl_txq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+ return -ENOMEM;
+ }
+
+ txq = &cpfl_txq->base;
+ hairpin_info = &cpfl_txq->hairpin_info;
+ /* Txq ring length should be 2 times of Tx completion queue size. */
+ txq->nb_tx_desc = nb_desc * 2;
+ txq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_start_qid, logic_qid);
+ txq->port_id = dev->data->port_id;
+ hairpin_info->hairpin_q = true;
+ hairpin_info->peer_rxp = peer_port;
+ hairpin_info->peer_rxq_id = peer_q;
+
+ if (conf->manual_bind != 0)
+ cpfl_vport->p2p_manual_bind = true;
+ else
+ cpfl_vport->p2p_manual_bind = false;
+
+ /* Always Tx hairpin queue allocates Tx HW ring */
+ ring_size = RTE_ALIGN(txq->nb_tx_desc * CPFL_P2P_DESC_LEN,
+ CPFL_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_ring", logic_qid,
+ ring_size + CPFL_P2P_RING_BUF,
+ CPFL_RING_BASE_ALIGN,
+ dev->device->numa_node);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
+ ret = -ENOMEM;
+ goto err_txq_mz_rsv;
+ }
+
+ txq->tx_ring_phys_addr = mz->iova;
+ txq->desc_ring = mz->addr;
+ txq->mz = mz;
+
+ cpfl_tx_hairpin_descq_reset(txq);
+ txq->qtx_tail = hw->hw_addr +
+ cpfl_hw_qtail_get(cpfl_vport->p2p_q_chunks_info->tx_qtail_start,
+ logic_qid, cpfl_vport->p2p_q_chunks_info->tx_qtail_spacing);
+ txq->ops = &def_txq_ops;
+
+ if (cpfl_vport->p2p_tx_complq == NULL) {
+ cq = rte_zmalloc_socket("cpfl hairpin cq",
+ sizeof(struct idpf_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ dev->device->numa_node);
+ if (!cq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+ ret = -ENOMEM;
+ goto err_cq_alloc;
+ }
+
+ cq->nb_tx_desc = nb_desc;
+ cq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_compl_start_qid,
+ 0);
+ cq->port_id = dev->data->port_id;
+
+ /* Tx completion queue always allocates the HW ring */
+ ring_size = RTE_ALIGN(cq->nb_tx_desc * CPFL_P2P_DESC_LEN,
+ CPFL_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_compl_ring", logic_qid,
+ ring_size + CPFL_P2P_RING_BUF,
+ CPFL_RING_BASE_ALIGN,
+ dev->device->numa_node);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX completion queue");
+ ret = -ENOMEM;
+ goto err_cq_mz_rsv;
+ }
+ cq->tx_ring_phys_addr = mz->iova;
+ cq->compl_ring = mz->addr;
+ cq->mz = mz;
+
+ cpfl_tx_hairpin_complq_reset(cq);
+ cpfl_vport->p2p_tx_complq = cq;
+ }
+
+ txq->complq = cpfl_vport->p2p_tx_complq;
+
+ cpfl_vport->nb_p2p_txq++;
+ txq->q_set = true;
+ dev->data->tx_queues[queue_idx] = cpfl_txq;
+
+ return 0;
+
+err_cq_mz_rsv:
+ rte_free(cq);
+err_cq_alloc:
+ cpfl_dma_zone_release(mz);
+err_txq_mz_rsv:
+ rte_free(cpfl_txq);
+ return ret;
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
@@ -865,6 +1225,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ continue;
(void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index a4a164d462..06198d4aad 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -22,6 +22,11 @@
#define CPFL_P2P_NB_TX_COMPLQ 1
#define CPFL_P2P_NB_QUEUE_GRPS 1
#define CPFL_P2P_QUEUE_GRP_ID 1
+#define CPFL_P2P_DESC_LEN 16
+#define CPFL_P2P_NB_MBUF 4096
+#define CPFL_P2P_CACHE_SIZE 250
+#define CPFL_P2P_MBUF_SIZE 2048
+#define CPFL_P2P_RING_BUF 128
/* Base address of the HW descriptor ring should be 128B aligned. */
#define CPFL_RING_BASE_ALIGN 128
@@ -33,14 +38,40 @@
#define CPFL_SUPPORT_CHAIN_NUM 5
+struct cpfl_rxq_hairpin_info {
+ bool hairpin_q; /* if rx queue is a hairpin queue */
+ uint16_t peer_txp;
+ uint16_t peer_txq_id;
+};
+
struct cpfl_rx_queue {
struct idpf_rx_queue base;
+ struct cpfl_rxq_hairpin_info hairpin_info;
+};
+
+struct cpfl_txq_hairpin_info {
+ bool hairpin_q; /* if tx queue is a hairpin queue */
+ uint16_t peer_rxp;
+ uint16_t peer_rxq_id;
};
struct cpfl_tx_queue {
struct idpf_tx_queue base;
+ struct cpfl_txq_hairpin_info hairpin_info;
};
+static inline uint16_t
+cpfl_hw_qid_get(uint16_t start_qid, uint16_t offset)
+{
+ return start_qid + offset;
+}
+
+static inline uint64_t
+cpfl_hw_qtail_get(uint64_t tail_start, uint16_t offset, uint64_t tail_spacing)
+{
+ return tail_start + offset * tail_spacing;
+}
+
int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
@@ -59,4 +90,9 @@ void cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void cpfl_set_rx_function(struct rte_eth_dev *dev);
void cpfl_set_tx_function(struct rte_eth_dev *dev);
+int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, const struct rte_eth_hairpin_conf *conf);
+int cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf);
#endif /* _CPFL_RXTX_H_ */
diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
index 5690b17911..d8e9191196 100644
--- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
+++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
@@ -85,6 +85,8 @@ cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)
cpfl_rxq = dev->data->rx_queues[i];
default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ continue;
splitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq->base);
ret = splitq_ret && default_ret;
} else {
@@ -106,6 +108,8 @@ cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_tx_queues; i++) {
cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq->hairpin_info.hairpin_q)
+ continue;
ret = cpfl_tx_vec_queue_default(&cpfl_txq->base);
if (ret == CPFL_SCALAR_PATH)
return CPFL_SCALAR_PATH;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v6 06/13] common/idpf: add queue config API
2023-05-31 10:25 ` [PATCH v6 00/13] net/cpfl: add hairpin queue support beilei.xing
` (4 preceding siblings ...)
2023-05-31 10:25 ` [PATCH v6 05/13] net/cpfl: support hairpin queue setup and release beilei.xing
@ 2023-05-31 10:25 ` beilei.xing
2023-05-31 10:25 ` [PATCH v6 07/13] net/cpfl: support hairpin queue configuration beilei.xing
` (7 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:25 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx queue configuration APIs.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 70 ++++++++++++++++++++++
drivers/common/idpf/idpf_common_virtchnl.h | 6 ++
drivers/common/idpf/version.map | 2 +
3 files changed, 78 insertions(+)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index a3fe55c897..211b44a88e 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -1050,6 +1050,41 @@ idpf_vc_rxq_config(struct idpf_vport *vport, struct idpf_rx_queue *rxq)
return err;
}
+int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct virtchnl2_rxq_info *rxq_info,
+ uint16_t num_qs)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+ struct idpf_cmd_info args;
+ int size, err, i;
+
+ size = sizeof(*vc_rxqs) + (num_qs - 1) *
+ sizeof(struct virtchnl2_rxq_info);
+ vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+ if (vc_rxqs == NULL) {
+ DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+ err = -ENOMEM;
+ return err;
+ }
+ vc_rxqs->vport_id = vport->vport_id;
+ vc_rxqs->num_qinfo = num_qs;
+ memcpy(vc_rxqs->qinfo, rxq_info, num_qs * sizeof(struct virtchnl2_rxq_info));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+ args.in_args = (uint8_t *)vc_rxqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ rte_free(vc_rxqs);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+ return err;
+}
+
int
idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq)
{
@@ -1121,6 +1156,41 @@ idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq)
return err;
}
+int
+idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
+ uint16_t num_qs)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+ struct idpf_cmd_info args;
+ int size, err;
+
+ size = sizeof(*vc_txqs) + (num_qs - 1) * sizeof(struct virtchnl2_txq_info);
+ vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+ if (vc_txqs == NULL) {
+ DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+ err = -ENOMEM;
+ return err;
+ }
+ vc_txqs->vport_id = vport->vport_id;
+ vc_txqs->num_qinfo = num_qs;
+ memcpy(vc_txqs->qinfo, txq_info, num_qs * sizeof(struct virtchnl2_txq_info));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+ args.in_args = (uint8_t *)vc_txqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ rte_free(vc_txqs);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+ return err;
+}
+
int
idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
struct idpf_ctlq_msg *q_msg)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index 58b16e1c5d..db83761a5e 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -65,6 +65,12 @@ __rte_internal
int idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
u16 *buff_count, struct idpf_dma_mem **buffs);
__rte_internal
+int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct virtchnl2_rxq_info *rxq_info,
+ uint16_t num_qs);
+__rte_internal
+int idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
+ uint16_t num_qs);
+__rte_internal
int idpf_vc_queue_grps_del(struct idpf_vport *vport,
uint16_t num_q_grps,
struct virtchnl2_queue_group_id *qg_ids);
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 01d18f3f3f..17e77884ce 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -54,8 +54,10 @@ INTERNAL {
idpf_vc_rss_lut_get;
idpf_vc_rss_lut_set;
idpf_vc_rxq_config;
+ idpf_vc_rxq_config_by_info;
idpf_vc_stats_query;
idpf_vc_txq_config;
+ idpf_vc_txq_config_by_info;
idpf_vc_vectors_alloc;
idpf_vc_vectors_dealloc;
idpf_vc_vport_create;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v6 07/13] net/cpfl: support hairpin queue configuration
2023-05-31 10:25 ` [PATCH v6 00/13] net/cpfl: add hairpin queue support beilei.xing
` (5 preceding siblings ...)
2023-05-31 10:25 ` [PATCH v6 06/13] common/idpf: add queue config API beilei.xing
@ 2023-05-31 10:25 ` beilei.xing
2023-05-31 10:25 ` [PATCH v6 08/13] common/idpf: add switch queue API beilei.xing
` (6 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:25 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx hairpin queue configuration.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 136 +++++++++++++++++++++++++++++++--
drivers/net/cpfl/cpfl_rxtx.c | 80 +++++++++++++++++++
drivers/net/cpfl/cpfl_rxtx.h | 7 ++
3 files changed, 217 insertions(+), 6 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index b17c538ec2..a06def06d0 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -742,33 +742,157 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
return idpf_vport_irq_map_config(vport, nb_rx_queues);
}
+/* Update hairpin_info for dev's tx hairpin queue */
+static int
+cpfl_txq_hairpin_info_update(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port];
+ struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private;
+ struct cpfl_txq_hairpin_info *hairpin_info;
+ struct cpfl_tx_queue *cpfl_txq;
+ int i;
+
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ hairpin_info = &cpfl_txq->hairpin_info;
+ if (hairpin_info->peer_rxp != rx_port) {
+ PMD_DRV_LOG(ERR, "port %d is not the peer port", rx_port);
+ return -EINVAL;
+ }
+ hairpin_info->peer_rxq_id =
+ cpfl_hw_qid_get(cpfl_rx_vport->p2p_q_chunks_info->rx_start_qid,
+ hairpin_info->peer_rxq_id - cpfl_rx_vport->nb_data_rxq);
+ }
+
+ return 0;
+}
+
+/* Bind Rx hairpin queue's memory zone to peer Tx hairpin queue's memory zone */
+static void
+cpfl_rxq_hairpin_mz_bind(struct rte_eth_dev *dev)
+{
+ struct cpfl_vport *cpfl_rx_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_rx_vport->base;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_hw *hw = &adapter->hw;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct rte_eth_dev *peer_dev;
+ const struct rte_memzone *mz;
+ uint16_t peer_tx_port;
+ uint16_t peer_tx_qid;
+ int i;
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ peer_tx_port = cpfl_rxq->hairpin_info.peer_txp;
+ peer_tx_qid = cpfl_rxq->hairpin_info.peer_txq_id;
+ peer_dev = &rte_eth_devices[peer_tx_port];
+ cpfl_txq = peer_dev->data->tx_queues[peer_tx_qid];
+
+ /* bind rx queue */
+ mz = cpfl_txq->base.mz;
+ cpfl_rxq->base.rx_ring_phys_addr = mz->iova;
+ cpfl_rxq->base.rx_ring = mz->addr;
+ cpfl_rxq->base.mz = mz;
+
+ /* bind rx buffer queue */
+ mz = cpfl_txq->base.complq->mz;
+ cpfl_rxq->base.bufq1->rx_ring_phys_addr = mz->iova;
+ cpfl_rxq->base.bufq1->rx_ring = mz->addr;
+ cpfl_rxq->base.bufq1->mz = mz;
+ cpfl_rxq->base.bufq1->qrx_tail = hw->hw_addr +
+ cpfl_hw_qtail_get(cpfl_rx_vport->p2p_q_chunks_info->rx_buf_qtail_start,
+ 0, cpfl_rx_vport->p2p_q_chunks_info->rx_buf_qtail_spacing);
+ }
+}
+
static int
cpfl_start_queues(struct rte_eth_dev *dev)
{
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_rx_queue *cpfl_rxq;
struct cpfl_tx_queue *cpfl_txq;
+ int update_flag = 0;
int err = 0;
int i;
+ /* For normal data queues, configure, init and enale Txq.
+ * For non-manual bind hairpin queues, configure Txq.
+ */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
cpfl_txq = dev->data->tx_queues[i];
if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
continue;
- err = cpfl_tx_queue_start(dev, i);
+ if (!cpfl_txq->hairpin_info.hairpin_q) {
+ err = cpfl_tx_queue_start(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
+ return err;
+ }
+ } else if (!cpfl_vport->p2p_manual_bind) {
+ if (update_flag == 0) {
+ err = cpfl_txq_hairpin_info_update(dev,
+ cpfl_txq->hairpin_info.peer_rxp);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to update Tx hairpin queue info");
+ return err;
+ }
+ update_flag = 1;
+ }
+ err = cpfl_hairpin_txq_config(vport, cpfl_txq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u", i);
+ return err;
+ }
+ }
+ }
+
+ /* For non-manual bind hairpin queues, configure Tx completion queue first.*/
+ if (!cpfl_vport->p2p_manual_bind && cpfl_vport->p2p_tx_complq != NULL) {
+ err = cpfl_hairpin_tx_complq_config(cpfl_vport);
if (err != 0) {
- PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
+ PMD_DRV_LOG(ERR, "Fail to config Tx completion queue");
return err;
}
}
+ /* For non-manual bind hairpin queues, configure Rx buffer queue.*/
+ if (!cpfl_vport->p2p_manual_bind && cpfl_vport->p2p_rx_bufq != NULL) {
+ cpfl_rxq_hairpin_mz_bind(dev);
+ err = cpfl_hairpin_rx_bufq_config(cpfl_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue");
+ return err;
+ }
+ }
+
+ /* For normal data queues, configure, init and enale Rxq.
+ * For non-manual bind hairpin queues, configure Rxq, and then init Rxq.
+ */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)
continue;
- err = cpfl_rx_queue_start(dev, i);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
- return err;
+ if (!cpfl_rxq->hairpin_info.hairpin_q) {
+ err = cpfl_rx_queue_start(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
+ return err;
+ }
+ } else if (!cpfl_vport->p2p_manual_bind) {
+ err = cpfl_hairpin_rxq_config(vport, cpfl_rxq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue %u", i);
+ return err;
+ }
+ err = cpfl_rx_queue_init(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u", i);
+ return err;
+ }
}
}
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 90b408d1f4..9408c6e1a4 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -922,6 +922,86 @@ cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return ret;
}
+int
+cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport)
+{
+ struct idpf_rx_queue *rx_bufq = cpfl_vport->p2p_rx_bufq;
+ struct virtchnl2_rxq_info rxq_info[1] = {0};
+
+ rxq_info[0].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ rxq_info[0].queue_id = rx_bufq->queue_id;
+ rxq_info[0].ring_len = rx_bufq->nb_rx_desc;
+ rxq_info[0].dma_ring_addr = rx_bufq->rx_ring_phys_addr;
+ rxq_info[0].desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ rxq_info[0].rx_buffer_low_watermark = CPFL_RXBUF_LOW_WATERMARK;
+ rxq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ rxq_info[0].data_buffer_size = rx_bufq->rx_buf_len;
+ rxq_info[0].buffer_notif_stride = CPFL_RX_BUF_STRIDE;
+
+ return idpf_vc_rxq_config_by_info(&cpfl_vport->base, rxq_info, 1);
+}
+
+int
+cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq)
+{
+ struct virtchnl2_rxq_info rxq_info[1] = {0};
+ struct idpf_rx_queue *rxq = &cpfl_rxq->base;
+
+ rxq_info[0].type = VIRTCHNL2_QUEUE_TYPE_RX;
+ rxq_info[0].queue_id = rxq->queue_id;
+ rxq_info[0].ring_len = rxq->nb_rx_desc;
+ rxq_info[0].dma_ring_addr = rxq->rx_ring_phys_addr;
+ rxq_info[0].rx_bufq1_id = rxq->bufq1->queue_id;
+ rxq_info[0].max_pkt_size = vport->max_pkt_len;
+ rxq_info[0].desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ rxq_info[0].qflags |= VIRTCHNL2_RX_DESC_SIZE_16BYTE;
+
+ rxq_info[0].data_buffer_size = rxq->rx_buf_len;
+ rxq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ rxq_info[0].rx_buffer_low_watermark = CPFL_RXBUF_LOW_WATERMARK;
+
+ PMD_DRV_LOG(NOTICE, "hairpin: vport %u, Rxq id 0x%x",
+ vport->vport_id, rxq_info[0].queue_id);
+
+ return idpf_vc_rxq_config_by_info(vport, rxq_info, 1);
+}
+
+int
+cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport)
+{
+ struct idpf_tx_queue *tx_complq = cpfl_vport->p2p_tx_complq;
+ struct virtchnl2_txq_info txq_info[1] = {0};
+
+ txq_info[0].dma_ring_addr = tx_complq->tx_ring_phys_addr;
+ txq_info[0].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ txq_info[0].queue_id = tx_complq->queue_id;
+ txq_info[0].ring_len = tx_complq->nb_tx_desc;
+ txq_info[0].peer_rx_queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+ txq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ txq_info[0].sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+
+ return idpf_vc_txq_config_by_info(&cpfl_vport->base, txq_info, 1);
+}
+
+int
+cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq)
+{
+ struct idpf_tx_queue *txq = &cpfl_txq->base;
+ struct virtchnl2_txq_info txq_info[1] = {0};
+
+ txq_info[0].dma_ring_addr = txq->tx_ring_phys_addr;
+ txq_info[0].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ txq_info[0].queue_id = txq->queue_id;
+ txq_info[0].ring_len = txq->nb_tx_desc;
+ txq_info[0].tx_compl_queue_id = txq->complq->queue_id;
+ txq_info[0].relative_queue_id = txq->queue_id;
+ txq_info[0].peer_rx_queue_id = cpfl_txq->hairpin_info.peer_rxq_id;
+ txq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ txq_info[0].sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+
+ return idpf_vc_txq_config_by_info(vport, txq_info, 1);
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 06198d4aad..872ebc1bfd 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -32,12 +32,15 @@
#define CPFL_RING_BASE_ALIGN 128
#define CPFL_DEFAULT_RX_FREE_THRESH 32
+#define CPFL_RXBUF_LOW_WATERMARK 64
#define CPFL_DEFAULT_TX_RS_THRESH 32
#define CPFL_DEFAULT_TX_FREE_THRESH 32
#define CPFL_SUPPORT_CHAIN_NUM 5
+#define CPFL_RX_BUF_STRIDE 64
+
struct cpfl_rxq_hairpin_info {
bool hairpin_q; /* if rx queue is a hairpin queue */
uint16_t peer_txp;
@@ -95,4 +98,8 @@ int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
int cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc,
const struct rte_eth_hairpin_conf *conf);
+int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport);
+int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq);
+int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
+int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
#endif /* _CPFL_RXTX_H_ */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v6 08/13] common/idpf: add switch queue API
2023-05-31 10:25 ` [PATCH v6 00/13] net/cpfl: add hairpin queue support beilei.xing
` (6 preceding siblings ...)
2023-05-31 10:25 ` [PATCH v6 07/13] net/cpfl: support hairpin queue configuration beilei.xing
@ 2023-05-31 10:25 ` beilei.xing
2023-05-31 10:25 ` [PATCH v6 09/13] net/cpfl: support hairpin queue start/stop beilei.xing
` (5 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:25 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds idpf_vc_ena_dis_one_queue API.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 2 +-
drivers/common/idpf/idpf_common_virtchnl.h | 3 +++
drivers/common/idpf/version.map | 1 +
3 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index 211b44a88e..6455f640da 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -733,7 +733,7 @@ idpf_vc_vectors_dealloc(struct idpf_vport *vport)
return err;
}
-static int
+int
idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
uint32_t type, bool on)
{
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index db83761a5e..9ff5c38c26 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -71,6 +71,9 @@ __rte_internal
int idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
uint16_t num_qs);
__rte_internal
+int idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
+ uint32_t type, bool on);
+__rte_internal
int idpf_vc_queue_grps_del(struct idpf_vport *vport,
uint16_t num_q_grps,
struct virtchnl2_queue_group_id *qg_ids);
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 17e77884ce..25624732b0 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -40,6 +40,7 @@ INTERNAL {
idpf_vc_cmd_execute;
idpf_vc_ctlq_post_rx_buffs;
idpf_vc_ctlq_recv;
+ idpf_vc_ena_dis_one_queue;
idpf_vc_irq_map_unmap_config;
idpf_vc_one_msg_read;
idpf_vc_ptype_info_query;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v6 09/13] net/cpfl: support hairpin queue start/stop
2023-05-31 10:25 ` [PATCH v6 00/13] net/cpfl: add hairpin queue support beilei.xing
` (7 preceding siblings ...)
2023-05-31 10:25 ` [PATCH v6 08/13] common/idpf: add switch queue API beilei.xing
@ 2023-05-31 10:25 ` beilei.xing
2023-05-31 10:25 ` [PATCH v6 10/13] common/idpf: add irq map config API beilei.xing
` (4 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:25 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx hairpin queue start/stop.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 46 +++++++++
drivers/net/cpfl/cpfl_rxtx.c | 164 +++++++++++++++++++++++++++++----
drivers/net/cpfl/cpfl_rxtx.h | 15 +++
3 files changed, 207 insertions(+), 18 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index a06def06d0..2b99e58341 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -896,6 +896,52 @@ cpfl_start_queues(struct rte_eth_dev *dev)
}
}
+ /* For non-manual bind hairpin queues, enable Tx queue and Rx queue,
+ * then enable Tx completion queue and Rx buffer queue.
+ */
+ for (i = cpfl_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq->hairpin_info.hairpin_q && !cpfl_vport->p2p_manual_bind) {
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ i - cpfl_vport->nb_data_txq,
+ false, true);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u on",
+ i);
+ else
+ cpfl_txq->base.q_started = true;
+ }
+ }
+
+ for (i = cpfl_vport->nb_data_rxq; i < dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q && !cpfl_vport->p2p_manual_bind) {
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ i - cpfl_vport->nb_data_rxq,
+ true, true);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u on",
+ i);
+ else
+ cpfl_rxq->base.q_started = true;
+ }
+ }
+
+ if (!cpfl_vport->p2p_manual_bind &&
+ cpfl_vport->p2p_tx_complq != NULL &&
+ cpfl_vport->p2p_rx_bufq != NULL) {
+ err = cpfl_switch_hairpin_complq(cpfl_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq");
+ return err;
+ }
+ err = cpfl_switch_hairpin_bufq(cpfl_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Rx bufq");
+ return err;
+ }
+ }
+
return err;
}
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 9408c6e1a4..8d1f8a560b 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -1002,6 +1002,89 @@ cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq
return idpf_vc_txq_config_by_info(vport, txq_info, 1);
}
+int
+cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ queue_id = cpfl_vport->p2p_tx_complq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
+int
+cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
+int
+cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t logic_qid,
+ bool rx, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = rx ? VIRTCHNL2_QUEUE_TYPE_RX : VIRTCHNL2_QUEUE_TYPE_TX;
+
+ if (type == VIRTCHNL2_QUEUE_TYPE_RX)
+ queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid, logic_qid);
+ else
+ queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_start_qid, logic_qid);
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+ if (err)
+ return err;
+
+ return err;
+}
+
+static int
+cpfl_alloc_split_p2p_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+ volatile struct virtchnl2_p2p_rx_buf_desc *rxd;
+ struct rte_mbuf *mbuf = NULL;
+ uint64_t dma_addr;
+ uint16_t i;
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ mbuf = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!mbuf)) {
+ PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+ return -ENOMEM;
+ }
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+ rxd = &((volatile struct virtchnl2_p2p_rx_buf_desc *)(rxq->rx_ring))[i];
+ rxd->reserve0 = 0;
+ rxd->pkt_addr = dma_addr;
+ }
+
+ rxq->nb_rx_hold = 0;
+ /* The value written in the RX buffer queue tail register, must be a multiple of 8.*/
+ rxq->rx_tail = rxq->nb_rx_desc - CPFL_HAIRPIN_Q_TAIL_AUX_VALUE;
+
+ return 0;
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
@@ -1055,22 +1138,31 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
IDPF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
} else {
/* Split queue */
- err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
- return err;
- }
- err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
- return err;
+ if (cpfl_rxq->hairpin_info.hairpin_q) {
+ err = cpfl_alloc_split_p2p_rxq_mbufs(rxq->bufq1);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate p2p RX buffer queue mbuf");
+ return err;
+ }
+ } else {
+ err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
+ return err;
+ }
+ err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
+ return err;
+ }
}
rte_wmb();
/* Init the RX tail register. */
IDPF_PCI_REG_WRITE(rxq->bufq1->qrx_tail, rxq->bufq1->rx_tail);
- IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail);
+ if (rxq->bufq2)
+ IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail);
}
return err;
@@ -1177,7 +1269,12 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return -EINVAL;
cpfl_rxq = dev->data->rx_queues[rx_queue_id];
- err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ rx_queue_id - cpfl_vport->nb_data_txq,
+ true, false);
+ else
+ err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
rx_queue_id);
@@ -1191,10 +1288,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
idpf_qc_single_rx_queue_reset(rxq);
} else {
rxq->bufq1->ops->release_mbufs(rxq->bufq1);
- rxq->bufq2->ops->release_mbufs(rxq->bufq2);
- idpf_qc_split_rx_queue_reset(rxq);
+ if (rxq->bufq2)
+ rxq->bufq2->ops->release_mbufs(rxq->bufq2);
+ if (cpfl_rxq->hairpin_info.hairpin_q) {
+ cpfl_rx_hairpin_descq_reset(rxq);
+ cpfl_rx_hairpin_bufq_reset(rxq->bufq1);
+ } else {
+ idpf_qc_split_rx_queue_reset(rxq);
+ }
}
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ if (!cpfl_rxq->hairpin_info.hairpin_q)
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1213,7 +1317,12 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
cpfl_txq = dev->data->tx_queues[tx_queue_id];
- err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
+ if (cpfl_txq->hairpin_info.hairpin_q)
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ tx_queue_id - cpfl_vport->nb_data_txq,
+ false, false);
+ else
+ err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
tx_queue_id);
@@ -1226,10 +1335,17 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
idpf_qc_single_tx_queue_reset(txq);
} else {
- idpf_qc_split_tx_descq_reset(txq);
- idpf_qc_split_tx_complq_reset(txq->complq);
+ if (cpfl_txq->hairpin_info.hairpin_q) {
+ cpfl_tx_hairpin_descq_reset(txq);
+ cpfl_tx_hairpin_complq_reset(txq->complq);
+ } else {
+ idpf_qc_split_tx_descq_reset(txq);
+ idpf_qc_split_tx_complq_reset(txq->complq);
+ }
}
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ if (!cpfl_txq->hairpin_info.hairpin_q)
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1249,10 +1365,22 @@ cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
void
cpfl_stop_queues(struct rte_eth_dev *dev)
{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
struct cpfl_rx_queue *cpfl_rxq;
struct cpfl_tx_queue *cpfl_txq;
int i;
+ if (cpfl_vport->p2p_tx_complq != NULL) {
+ if (cpfl_switch_hairpin_complq(cpfl_vport, false) != 0)
+ PMD_DRV_LOG(ERR, "Failed to stop hairpin Tx complq");
+ }
+
+ if (cpfl_vport->p2p_rx_bufq != NULL) {
+ if (cpfl_switch_hairpin_bufq(cpfl_vport, false) != 0)
+ PMD_DRV_LOG(ERR, "Failed to stop hairpin Rx bufq");
+ }
+
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
if (cpfl_rxq == NULL)
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 872ebc1bfd..aacd087b56 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -41,6 +41,17 @@
#define CPFL_RX_BUF_STRIDE 64
+/* The value written in the RX buffer queue tail register,
+ * and in WritePTR field in the TX completion queue context,
+ * must be a multiple of 8.
+ */
+#define CPFL_HAIRPIN_Q_TAIL_AUX_VALUE 8
+
+struct virtchnl2_p2p_rx_buf_desc {
+ __le64 reserve0;
+ __le64 pkt_addr; /* Packet buffer address */
+};
+
struct cpfl_rxq_hairpin_info {
bool hairpin_q; /* if rx queue is a hairpin queue */
uint16_t peer_txp;
@@ -102,4 +113,8 @@ int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq);
int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
+int cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t qid,
+ bool rx, bool on);
#endif /* _CPFL_RXTX_H_ */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v6 10/13] common/idpf: add irq map config API
2023-05-31 10:25 ` [PATCH v6 00/13] net/cpfl: add hairpin queue support beilei.xing
` (8 preceding siblings ...)
2023-05-31 10:25 ` [PATCH v6 09/13] net/cpfl: support hairpin queue start/stop beilei.xing
@ 2023-05-31 10:25 ` beilei.xing
2023-05-31 10:25 ` [PATCH v6 11/13] net/cpfl: enable write back based on ITR expire beilei.xing
` (3 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:25 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch supports idpf_vport_irq_map_config_by_qids API.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_device.c | 75 ++++++++++++++++++++++++
drivers/common/idpf/idpf_common_device.h | 4 ++
drivers/common/idpf/version.map | 1 +
3 files changed, 80 insertions(+)
diff --git a/drivers/common/idpf/idpf_common_device.c b/drivers/common/idpf/idpf_common_device.c
index dc47551b17..cc4207a46e 100644
--- a/drivers/common/idpf/idpf_common_device.c
+++ b/drivers/common/idpf/idpf_common_device.c
@@ -667,6 +667,81 @@ idpf_vport_irq_map_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
return ret;
}
+int
+idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport, uint32_t *qids, uint16_t nb_rx_queues)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_queue_vector *qv_map;
+ struct idpf_hw *hw = &adapter->hw;
+ uint32_t dynctl_val, itrn_val;
+ uint32_t dynctl_reg_start;
+ uint32_t itrn_reg_start;
+ uint16_t i;
+ int ret;
+
+ qv_map = rte_zmalloc("qv_map",
+ nb_rx_queues *
+ sizeof(struct virtchnl2_queue_vector), 0);
+ if (qv_map == NULL) {
+ DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
+ nb_rx_queues);
+ ret = -ENOMEM;
+ goto qv_map_alloc_err;
+ }
+
+ /* Rx interrupt disabled, Map interrupt only for writeback */
+
+ /* The capability flags adapter->caps.other_caps should be
+ * compared with bit VIRTCHNL2_CAP_WB_ON_ITR here. The if
+ * condition should be updated when the FW can return the
+ * correct flag bits.
+ */
+ dynctl_reg_start =
+ vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
+ itrn_reg_start =
+ vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
+ dynctl_val = IDPF_READ_REG(hw, dynctl_reg_start);
+ DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x", dynctl_val);
+ itrn_val = IDPF_READ_REG(hw, itrn_reg_start);
+ DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val);
+ /* Force write-backs by setting WB_ON_ITR bit in DYN_CTL
+ * register. WB_ON_ITR and INTENA are mutually exclusive
+ * bits. Setting WB_ON_ITR bits means TX and RX Descs
+ * are written back based on ITR expiration irrespective
+ * of INTENA setting.
+ */
+ /* TBD: need to tune INTERVAL value for better performance. */
+ itrn_val = (itrn_val == 0) ? IDPF_DFLT_INTERVAL : itrn_val;
+ dynctl_val = VIRTCHNL2_ITR_IDX_0 <<
+ PF_GLINT_DYN_CTL_ITR_INDX_S |
+ PF_GLINT_DYN_CTL_WB_ON_ITR_M |
+ itrn_val << PF_GLINT_DYN_CTL_INTERVAL_S;
+ IDPF_WRITE_REG(hw, dynctl_reg_start, dynctl_val);
+
+ for (i = 0; i < nb_rx_queues; i++) {
+ /* map all queues to the same vector */
+ qv_map[i].queue_id = qids[i];
+ qv_map[i].vector_id =
+ vport->recv_vectors->vchunks.vchunks->start_vector_id;
+ }
+ vport->qv_map = qv_map;
+
+ ret = idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, true);
+ if (ret != 0) {
+ DRV_LOG(ERR, "config interrupt mapping failed");
+ goto config_irq_map_err;
+ }
+
+ return 0;
+
+config_irq_map_err:
+ rte_free(vport->qv_map);
+ vport->qv_map = NULL;
+
+qv_map_alloc_err:
+ return ret;
+}
+
int
idpf_vport_irq_unmap_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
{
diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index 112367dae8..f767ea7cec 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -200,5 +200,9 @@ int idpf_vport_info_init(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_info);
__rte_internal
void idpf_vport_stats_update(struct virtchnl2_vport_stats *oes, struct virtchnl2_vport_stats *nes);
+__rte_internal
+int idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport,
+ uint32_t *qids,
+ uint16_t nb_rx_queues);
#endif /* _IDPF_COMMON_DEVICE_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 25624732b0..0729f6b912 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -69,6 +69,7 @@ INTERNAL {
idpf_vport_info_init;
idpf_vport_init;
idpf_vport_irq_map_config;
+ idpf_vport_irq_map_config_by_qids;
idpf_vport_irq_unmap_config;
idpf_vport_rss_config;
idpf_vport_stats_update;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v6 11/13] net/cpfl: enable write back based on ITR expire
2023-05-31 10:25 ` [PATCH v6 00/13] net/cpfl: add hairpin queue support beilei.xing
` (9 preceding siblings ...)
2023-05-31 10:25 ` [PATCH v6 10/13] common/idpf: add irq map config API beilei.xing
@ 2023-05-31 10:25 ` beilei.xing
2023-05-31 10:25 ` [PATCH v6 12/13] net/cpfl: support peer ports get beilei.xing
` (2 subsequent siblings)
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:25 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch enables write back based on ITR expire
(WR_ON_ITR) for hairpin queues.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 2b99e58341..850f1c0bc6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -735,11 +735,22 @@ cpfl_dev_configure(struct rte_eth_dev *dev)
static int
cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
{
+ uint32_t qids[CPFL_MAX_P2P_NB_QUEUES + IDPF_DEFAULT_RXQ_NUM] = {0};
struct cpfl_vport *cpfl_vport = dev->data->dev_private;
struct idpf_vport *vport = &cpfl_vport->base;
uint16_t nb_rx_queues = dev->data->nb_rx_queues;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i;
- return idpf_vport_irq_map_config(vport, nb_rx_queues);
+ for (i = 0; i < nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ qids[i] = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid,
+ (i - cpfl_vport->nb_data_rxq));
+ else
+ qids[i] = cpfl_hw_qid_get(vport->chunks_info.rx_start_qid, i);
+ }
+ return idpf_vport_irq_map_config_by_qids(vport, qids, nb_rx_queues);
}
/* Update hairpin_info for dev's tx hairpin queue */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v6 12/13] net/cpfl: support peer ports get
2023-05-31 10:25 ` [PATCH v6 00/13] net/cpfl: add hairpin queue support beilei.xing
` (10 preceding siblings ...)
2023-05-31 10:25 ` [PATCH v6 11/13] net/cpfl: enable write back based on ITR expire beilei.xing
@ 2023-05-31 10:25 ` beilei.xing
2023-05-31 10:25 ` [PATCH v6 13/13] net/cpfl: support hairpin bind/unbind beilei.xing
2023-05-31 13:04 ` [PATCH v7 00/14] net/cpfl: add hairpin queue support beilei.xing
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:25 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports get hairpin peer ports.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 40 ++++++++++++++++++++++++++++++++++
1 file changed, 40 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 850f1c0bc6..9fc7d3401f 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1080,6 +1080,45 @@ cpfl_dev_close(struct rte_eth_dev *dev)
return 0;
}
+static int
+cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
+ size_t len, uint32_t tx)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_tx_queue *txq;
+ struct idpf_rx_queue *rxq;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i, j;
+
+ if (len <= 0)
+ return -EINVAL;
+
+ if (cpfl_vport->p2p_q_chunks_info == NULL)
+ return -ENOTSUP;
+
+ if (tx > 0) {
+ for (i = cpfl_vport->nb_data_txq, j = 0; i < dev->data->nb_tx_queues; i++, j++) {
+ txq = dev->data->tx_queues[i];
+ if (txq == NULL)
+ return -EINVAL;
+ cpfl_txq = (struct cpfl_tx_queue *)txq;
+ peer_ports[j] = cpfl_txq->hairpin_info.peer_rxp;
+ }
+ } else if (tx == 0) {
+ for (i = cpfl_vport->nb_data_rxq, j = 0; i < dev->data->nb_rx_queues; i++, j++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq == NULL)
+ return -EINVAL;
+ cpfl_rxq = (struct cpfl_rx_queue *)rxq;
+ peer_ports[j] = cpfl_rxq->hairpin_info.peer_txp;
+ }
+ }
+
+ return j;
+}
+
static const struct eth_dev_ops cpfl_eth_dev_ops = {
.dev_configure = cpfl_dev_configure,
.dev_close = cpfl_dev_close,
@@ -1109,6 +1148,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.hairpin_cap_get = cpfl_hairpin_cap_get,
.rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
.tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
+ .hairpin_get_peer_ports = cpfl_hairpin_get_peer_ports,
};
static int
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v6 13/13] net/cpfl: support hairpin bind/unbind
2023-05-31 10:25 ` [PATCH v6 00/13] net/cpfl: add hairpin queue support beilei.xing
` (11 preceding siblings ...)
2023-05-31 10:25 ` [PATCH v6 12/13] net/cpfl: support peer ports get beilei.xing
@ 2023-05-31 10:25 ` beilei.xing
2023-05-31 13:04 ` [PATCH v7 00/14] net/cpfl: add hairpin queue support beilei.xing
13 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 10:25 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports hairpin_bind/unbind ops.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 137 +++++++++++++++++++++++++++++++++
1 file changed, 137 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 9fc7d3401f..ff36f02b11 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1119,6 +1119,141 @@ cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
return j;
}
+static int
+cpfl_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct idpf_vport *tx_vport = &cpfl_tx_vport->base;
+ struct cpfl_vport *cpfl_rx_vport;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct rte_eth_dev *peer_dev;
+ struct idpf_vport *rx_vport;
+ int err = 0;
+ int i;
+
+ err = cpfl_txq_hairpin_info_update(dev, rx_port);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to update Tx hairpin queue info.");
+ return err;
+ }
+
+ /* configure hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ err = cpfl_hairpin_txq_config(tx_vport, cpfl_txq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u", i);
+ return err;
+ }
+ }
+
+ err = cpfl_hairpin_tx_complq_config(cpfl_tx_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Tx completion queue");
+ return err;
+ }
+
+ peer_dev = &rte_eth_devices[rx_port];
+ cpfl_rx_vport = (struct cpfl_vport *)peer_dev->data->dev_private;
+ rx_vport = &cpfl_rx_vport->base;
+ cpfl_rxq_hairpin_mz_bind(peer_dev);
+
+ err = cpfl_hairpin_rx_bufq_config(cpfl_rx_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue");
+ return err;
+ }
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ err = cpfl_hairpin_rxq_config(rx_vport, cpfl_rxq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue %u", i);
+ return err;
+ }
+ err = cpfl_rx_queue_init(peer_dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u", i);
+ return err;
+ }
+ }
+
+ /* enable hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport,
+ i - cpfl_tx_vport->nb_data_txq,
+ false, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u on",
+ i);
+ return err;
+ }
+ cpfl_txq->base.q_started = true;
+ }
+
+ err = cpfl_switch_hairpin_complq(cpfl_tx_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq");
+ return err;
+ }
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport,
+ i - cpfl_rx_vport->nb_data_rxq,
+ true, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u on",
+ i);
+ }
+ cpfl_rxq->base.q_started = true;
+ }
+
+ err = cpfl_switch_hairpin_bufq(cpfl_rx_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Rx buffer queue");
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+cpfl_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port];
+ struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i;
+
+ /* disable hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport,
+ i - cpfl_tx_vport->nb_data_txq,
+ false, false);
+ cpfl_txq->base.q_started = false;
+ }
+
+ cpfl_switch_hairpin_complq(cpfl_tx_vport, false);
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport,
+ i - cpfl_rx_vport->nb_data_rxq,
+ true, false);
+ cpfl_rxq->base.q_started = false;
+ }
+
+ cpfl_switch_hairpin_bufq(cpfl_rx_vport, false);
+
+ return 0;
+}
+
static const struct eth_dev_ops cpfl_eth_dev_ops = {
.dev_configure = cpfl_dev_configure,
.dev_close = cpfl_dev_close,
@@ -1149,6 +1284,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
.tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
.hairpin_get_peer_ports = cpfl_hairpin_get_peer_ports,
+ .hairpin_bind = cpfl_hairpin_bind,
+ .hairpin_unbind = cpfl_hairpin_unbind,
};
static int
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH v4 05/13] net/cpfl: support hairpin queue setup and release
2023-05-30 2:49 ` Liu, Mingxia
@ 2023-05-31 10:53 ` Xing, Beilei
0 siblings, 0 replies; 164+ messages in thread
From: Xing, Beilei @ 2023-05-31 10:53 UTC (permalink / raw)
To: Liu, Mingxia, Wu, Jingjing; +Cc: dev, Wang, Xiao W
> -----Original Message-----
> From: Liu, Mingxia <mingxia.liu@intel.com>
> Sent: Tuesday, May 30, 2023 10:50 AM
> To: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Wang, Xiao W <xiao.w.wang@intel.com>
> Subject: RE: [PATCH v4 05/13] net/cpfl: support hairpin queue setup and
> release
>
>
>
> > -----Original Message-----
> > From: Liu, Mingxia
> > Sent: Tuesday, May 30, 2023 10:27 AM
> > To: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing
> > <jingjing.wu@intel.com>
> > Cc: dev@dpdk.org; Wang, Xiao W <xiao.w.wang@intel.com>
> > Subject: RE: [PATCH v4 05/13] net/cpfl: support hairpin queue setup
> > and release
> >
> >
> >
> > > -----Original Message-----
> > > From: Xing, Beilei <beilei.xing@intel.com>
> > > Sent: Friday, May 26, 2023 3:39 PM
> > > To: Wu, Jingjing <jingjing.wu@intel.com>
> > > Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> > > <beilei.xing@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>
> > > Subject: [PATCH v4 05/13] net/cpfl: support hairpin queue setup and
> > > release
> > >
> > > From: Beilei Xing <beilei.xing@intel.com>
> > >
> > > Support hairpin Rx/Tx queue setup and release.
> > >
> > > Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> > > Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
> > > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > > ---
> > > drivers/net/cpfl/cpfl_ethdev.c | 6 +
> > > drivers/net/cpfl/cpfl_ethdev.h | 11 +
> > > drivers/net/cpfl/cpfl_rxtx.c | 353 +++++++++++++++++++++++-
> > > drivers/net/cpfl/cpfl_rxtx.h | 36 +++
> > > drivers/net/cpfl/cpfl_rxtx_vec_common.h | 4 +
> > > 5 files changed, 409 insertions(+), 1 deletion(-)
> > >
> > > diff --git a/drivers/net/cpfl/cpfl_ethdev.c
> > > b/drivers/net/cpfl/cpfl_ethdev.c index
> > > 40b4515539..b17c538ec2 100644
> > > --- a/drivers/net/cpfl/cpfl_ethdev.c
> > > +++ b/drivers/net/cpfl/cpfl_ethdev.c
> > > @@ -879,6 +879,10 @@ cpfl_dev_close(struct rte_eth_dev *dev)
> > > struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport-
> > > >adapter);
> > >
> > > cpfl_dev_stop(dev);
> > > + if (cpfl_vport->p2p_mp) {
> > > + rte_mempool_free(cpfl_vport->p2p_mp);
> > > + cpfl_vport->p2p_mp = NULL;
> > > + }
> > >
> > > if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
> > > cpfl_p2p_queue_grps_del(vport);
> > > @@ -922,6 +926,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops =
> {
> > > .xstats_get_names = cpfl_dev_xstats_get_names,
> > > .xstats_reset = cpfl_dev_xstats_reset,
> > > .hairpin_cap_get = cpfl_hairpin_cap_get,
> > > + .rx_hairpin_queue_setup =
> cpfl_rx_hairpin_queue_setup,
> > > + .tx_hairpin_queue_setup =
> cpfl_tx_hairpin_queue_setup,
> > > };
> > >
> > > +int
> > > +cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
> > > + uint16_t nb_desc,
> > > + const struct rte_eth_hairpin_conf *conf) {
> > > + struct cpfl_vport *cpfl_vport = (struct cpfl_vport *)dev->data-
> > > >dev_private;
> > > + struct idpf_vport *vport = &cpfl_vport->base;
> > > + struct idpf_adapter *adapter_base = vport->adapter;
> > > + uint16_t logic_qid = cpfl_vport->nb_p2p_rxq;
> > > + struct cpfl_rxq_hairpin_info *hairpin_info;
> > > + struct cpfl_rx_queue *cpfl_rxq;
> > > + struct idpf_rx_queue *bufq1 = NULL;
> > > + struct idpf_rx_queue *rxq;
> > > + uint16_t peer_port, peer_q;
> > > + uint16_t qid;
> > > + int ret;
> > > +
> > > + if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
> > > + PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin
> > > queue.");
> > > + return -EINVAL;
> > > + }
> > > +
> > > + if (conf->peer_count != 1) {
> > > + PMD_INIT_LOG(ERR, "Can't support Rx hairpin queue peer
> > > count %d", conf->peer_count);
> > > + return -EINVAL;
> > > + }
> > > +
> > > + peer_port = conf->peers[0].port;
> > > + peer_q = conf->peers[0].queue;
> > > +
> > > + if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
> > > + nb_desc > CPFL_MAX_RING_DESC ||
> > > + nb_desc < CPFL_MIN_RING_DESC) {
> > > + PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is
> > > invalid", nb_desc);
> > > + return -EINVAL;
> > > + }
> > > +
> > > + /* Free memory if needed */
> > > + if (dev->data->rx_queues[queue_idx]) {
> > > + cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
> > > + dev->data->rx_queues[queue_idx] = NULL;
> > > + }
> > > +
> > > + /* Setup Rx description queue */
> > > + cpfl_rxq = rte_zmalloc_socket("cpfl hairpin rxq",
> > > + sizeof(struct cpfl_rx_queue),
> > > + RTE_CACHE_LINE_SIZE,
> > > + SOCKET_ID_ANY);
> > > + if (!cpfl_rxq) {
> > > + PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue
> > > data structure");
> > > + return -ENOMEM;
> > > + }
> > > +
> > > + rxq = &cpfl_rxq->base;
> > > + hairpin_info = &cpfl_rxq->hairpin_info;
> > > + rxq->nb_rx_desc = nb_desc * 2;
> > > + rxq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info-
> > > >rx_start_qid, logic_qid);
> > > + rxq->port_id = dev->data->port_id;
> > > + rxq->adapter = adapter_base;
> > > + rxq->rx_buf_len = CPFL_P2P_MBUF_SIZE -
> RTE_PKTMBUF_HEADROOM;
> > > + hairpin_info->hairpin_q = true;
> > > + hairpin_info->peer_txp = peer_port;
> > > + hairpin_info->peer_txq_id = peer_q;
> > > +
> > > + if (conf->manual_bind != 0)
> > > + cpfl_vport->p2p_manual_bind = true;
> > > + else
> > > + cpfl_vport->p2p_manual_bind = false;
> > > +
> > > + if (cpfl_vport->p2p_rx_bufq == NULL) {
> > > + bufq1 = rte_zmalloc_socket("hairpin rx bufq1",
> > > + sizeof(struct idpf_rx_queue),
> > > + RTE_CACHE_LINE_SIZE,
> > > + SOCKET_ID_ANY);
> > > + if (!bufq1) {
> > > + PMD_INIT_LOG(ERR, "Failed to allocate memory for
> > > hairpin Rx buffer queue 1.");
> > > + ret = -ENOMEM;
> > > + goto err_alloc_bufq1;
> > > + }
> > > + qid = 2 * logic_qid;
> > > + ret = cpfl_rx_hairpin_bufq_setup(dev, bufq1, qid, nb_desc);
> > > + if (ret) {
> > > + PMD_INIT_LOG(ERR, "Failed to setup hairpin Rx buffer
> > > queue 1");
> > > + ret = -EINVAL;
> > > + goto err_setup_bufq1;
> > > + }
> > > + cpfl_vport->p2p_rx_bufq = bufq1;
> > > + }
> > > +
> > > + rxq->bufq1 = cpfl_vport->p2p_rx_bufq;
> > > + rxq->bufq2 = NULL;
> > > +
> > > + cpfl_vport->nb_p2p_rxq++;
> > > + rxq->q_set = true;
> > > + dev->data->rx_queues[queue_idx] = cpfl_rxq;
> > > +
> > > + return 0;
> > > +
> > > +err_setup_bufq1:
> > > + rte_free(bufq1);
> > > +err_alloc_bufq1:
> > > + rte_free(rxq);
> > [Liu, Mingxia] Here should free cpfl_rxq, right?
Thanks for the comments, all err process are refined in new version.
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH v4 09/13] net/cpfl: support hairpin queue start/stop
2023-05-30 3:30 ` Liu, Mingxia
@ 2023-05-31 10:53 ` Xing, Beilei
0 siblings, 0 replies; 164+ messages in thread
From: Xing, Beilei @ 2023-05-31 10:53 UTC (permalink / raw)
To: Liu, Mingxia, Wu, Jingjing; +Cc: dev, Wang, Xiao W
> -----Original Message-----
> From: Liu, Mingxia <mingxia.liu@intel.com>
> Sent: Tuesday, May 30, 2023 11:31 AM
> To: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Wang, Xiao W <xiao.w.wang@intel.com>
> Subject: RE: [PATCH v4 09/13] net/cpfl: support hairpin queue start/stop
>
>
>
> > -----Original Message-----
> > From: Xing, Beilei <beilei.xing@intel.com>
> > Sent: Friday, May 26, 2023 3:39 PM
> > To: Wu, Jingjing <jingjing.wu@intel.com>
> > Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> > <beilei.xing@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>
> > Subject: [PATCH v4 09/13] net/cpfl: support hairpin queue start/stop
> >
> > From: Beilei Xing <beilei.xing@intel.com>
> >
> > This patch supports Rx/Tx hairpin queue start/stop.
> >
> > Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> > Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > ---
> > drivers/net/cpfl/cpfl_ethdev.c | 41 +++++++++
> > drivers/net/cpfl/cpfl_rxtx.c | 151 +++++++++++++++++++++++++++++----
> > drivers/net/cpfl/cpfl_rxtx.h | 14 +++
> > 3 files changed, 188 insertions(+), 18 deletions(-)
> >
> > diff --git a/drivers/net/cpfl/cpfl_ethdev.c
> > b/drivers/net/cpfl/cpfl_ethdev.c index
> > a06def06d0..8035878602 100644
> > --- a/drivers/net/cpfl/cpfl_ethdev.c
> > +++ b/drivers/net/cpfl/cpfl_ethdev.c
> > @@ -896,6 +896,47 @@ cpfl_start_queues(struct rte_eth_dev *dev)
> > }
> > }
> >
> > + /* For non-manual bind hairpin queues, enable Tx queue and Rx queue,
> > + * then enable Tx completion queue and Rx buffer queue.
> > + */
> > + for (i = 0; i < dev->data->nb_tx_queues; i++) {
> [Liu, Mingxia] Better to use for (i = cpfl_tx_vport->nb_data_txq; i < dev->data-
> >nb_tx_queues; i++), because when i < cpfl_tx_vport->nb_data_txq, (cpfl_txq-
> >hairpin_info.hairpin_q && !cpfl_vport-
> > >p2p_manual_bind) must be false, or (i - cpfl_vport->nb_data_txq) will < 0.
>
> > + cpfl_txq = dev->data->tx_queues[i];
> > + if (cpfl_txq->hairpin_info.hairpin_q && !cpfl_vport-
> > >p2p_manual_bind) {
> > + err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
> > + i - cpfl_vport-
> > >nb_data_txq,
> > + false, true);
> > + if (err)
> > + PMD_DRV_LOG(ERR, "Failed to switch hairpin
> > TX queue %u on",
> > + i);
> > + else
> > + cpfl_txq->base.q_started = true;
> > + }
> > + }
> > +
> > + for (i = 0; i < dev->data->nb_rx_queues; i++) {
> [Liu, Mingxia] Better to use for (i = cpfl_rx_vport->nb_data_rxq; i < dev->data-
> >nb_rx_queues; i++), because when i < cpfl_rx_vport->nb_data_rxq, (cpfl_txq-
> >hairpin_info.hairpin_q && !cpfl_vport-
> > >p2p_manual_bind) must be false, or (i - cpfl_vport->nb_data_rxq) will < 0.
Make sense.
>
> > + cpfl_rxq = dev->data->rx_queues[i];
> > + if (cpfl_rxq->hairpin_info.hairpin_q && !cpfl_vport-
> > >p2p_manual_bind) {
> > + err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
> > + i - cpfl_vport-
> > >nb_data_rxq,
> > + true, true);
> > + if (err)
> > + PMD_DRV_LOG(ERR, "Failed to switch hairpin
> > RX queue %u on",
> > + i);
> > + else
> > + cpfl_rxq->base.q_started = true;
> > + }
> > + }
> > +
> > + if (!cpfl_vport->p2p_manual_bind &&
> > + cpfl_vport->p2p_tx_complq != NULL &&
> > + cpfl_vport->p2p_rx_bufq != NULL) {
> > + err = cpfl_switch_hairpin_bufq_complq(cpfl_vport, true);
> > + if (err != 0) {
> > + PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx
> > complq and Rx bufq");
> > + return err;
> > + }
> > + }
> > +
> > return err;
> > }
> >
> > diff --git a/drivers/net/cpfl/cpfl_rxtx.c
> > b/drivers/net/cpfl/cpfl_rxtx.c index
> > 702054d1c5..38c48ad8c7 100644
> > --- a/drivers/net/cpfl/cpfl_rxtx.c
> > +++ b/drivers/net/cpfl/cpfl_rxtx.c
> > @@ -991,6 +991,81 @@ cpfl_hairpin_txq_config(struct idpf_vport *vport,
> > struct cpfl_tx_queue *cpfl_txq
> > return idpf_vc_txq_config_by_info(vport, txq_info, 1); }
> >
> > +int
> > +cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport, bool
> > +on) {
> > + struct idpf_vport *vport = &cpfl_vport->base;
> > + uint32_t type;
> > + int err, queue_id;
> > +
> > + type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
> > + queue_id = cpfl_vport->p2p_tx_complq->queue_id;
> > + err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
> > + if (err)
> > + return err;
> > +
> > + type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
> > + queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
> > + err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
> > +
> > + return err;
> > +}
> > +
> > +int
> > +cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport,
> > +uint16_t
> > logic_qid,
> > + bool rx, bool on)
> > +{
> > + struct idpf_vport *vport = &cpfl_vport->base;
> > + uint32_t type;
> > + int err, queue_id;
> > +
> > + type = rx ? VIRTCHNL2_QUEUE_TYPE_RX :
> > VIRTCHNL2_QUEUE_TYPE_TX;
> > +
> > + if (type == VIRTCHNL2_QUEUE_TYPE_RX)
> > + queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info-
> > >rx_start_qid, logic_qid);
> > + else
> > + queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info-
> > >tx_start_qid, logic_qid);
> > + err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
> > + if (err)
> > + return err;
> > +
> > + return err;
> > +}
> > +
> > +static int
> > +cpfl_alloc_split_p2p_rxq_mbufs(struct idpf_rx_queue *rxq) {
> > + volatile struct virtchnl2_p2p_rx_buf_desc *rxd;
> > + struct rte_mbuf *mbuf = NULL;
> > + uint64_t dma_addr;
> > + uint16_t i;
> > +
> > + for (i = 0; i < rxq->nb_rx_desc; i++) {
> > + mbuf = rte_mbuf_raw_alloc(rxq->mp);
> > + if (unlikely(!mbuf)) {
> > + PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
> > + return -ENOMEM;
> > + }
> > +
> > + rte_mbuf_refcnt_set(mbuf, 1);
> > + mbuf->next = NULL;
> > + mbuf->data_off = RTE_PKTMBUF_HEADROOM;
> > + mbuf->nb_segs = 1;
> > + mbuf->port = rxq->port_id;
> > + dma_addr =
> > rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
> > +
> > + rxd = &((volatile struct virtchnl2_p2p_rx_buf_desc *)(rxq-
> > >rx_ring))[i];
> > + rxd->reserve0 = 0;
> > + rxd->pkt_addr = dma_addr;
> > + }
> > +
> > + rxq->nb_rx_hold = 0;
> > + /* The value written in the RX buffer queue tail register, must be a
> > multiple of 8.*/
> > + rxq->rx_tail = rxq->nb_rx_desc - CPFL_HAIRPIN_Q_TAIL_AUX_VALUE;
> > +
> > + return 0;
> > +}
> > +
> > int
> > cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id) {
> > @@ -
> > 1044,22 +1119,31 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev,
> > uint16_t
> > rx_queue_id)
> > IDPF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
> > } else {
> > /* Split queue */
> > - err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
> > - if (err != 0) {
> > - PMD_DRV_LOG(ERR, "Failed to allocate RX buffer
> > queue mbuf");
> > - return err;
> > - }
> > - err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
> > - if (err != 0) {
> > - PMD_DRV_LOG(ERR, "Failed to allocate RX buffer
> > queue mbuf");
> > - return err;
> > + if (cpfl_rxq->hairpin_info.hairpin_q) {
> > + err = cpfl_alloc_split_p2p_rxq_mbufs(rxq->bufq1);
> > + if (err != 0) {
> > + PMD_DRV_LOG(ERR, "Failed to allocate p2p
> RX
> > buffer queue mbuf");
> > + return err;
> > + }
> > + } else {
> > + err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
> > + if (err != 0) {
> > + PMD_DRV_LOG(ERR, "Failed to allocate RX
> > buffer queue mbuf");
> > + return err;
> > + }
> > + err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
> > + if (err != 0) {
> > + PMD_DRV_LOG(ERR, "Failed to allocate RX
> > buffer queue mbuf");
> > + return err;
> > + }
> > }
> >
> > rte_wmb();
> >
> > /* Init the RX tail register. */
> > IDPF_PCI_REG_WRITE(rxq->bufq1->qrx_tail, rxq->bufq1-
> > >rx_tail);
> > - IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2-
> > >rx_tail);
> > + if (rxq->bufq2)
> > + IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq-
> > >bufq2->rx_tail);
> > }
> >
> > return err;
> > @@ -1166,7 +1250,12 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev,
> > uint16_t rx_queue_id)
> > return -EINVAL;
> >
> > cpfl_rxq = dev->data->rx_queues[rx_queue_id];
> > - err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
> > + if (cpfl_rxq->hairpin_info.hairpin_q)
> > + err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
> > + rx_queue_id - cpfl_vport-
> > >nb_data_txq,
> > + true, false);
> > + else
> > + err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
> > if (err != 0) {
> > PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
> > rx_queue_id);
> > @@ -1180,10 +1269,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev,
> > uint16_t rx_queue_id)
> > idpf_qc_single_rx_queue_reset(rxq);
> > } else {
> > rxq->bufq1->ops->release_mbufs(rxq->bufq1);
> > - rxq->bufq2->ops->release_mbufs(rxq->bufq2);
> > - idpf_qc_split_rx_queue_reset(rxq);
> > + if (rxq->bufq2)
> > + rxq->bufq2->ops->release_mbufs(rxq->bufq2);
> > + if (cpfl_rxq->hairpin_info.hairpin_q) {
> > + cpfl_rx_hairpin_descq_reset(rxq);
> > + cpfl_rx_hairpin_bufq_reset(rxq->bufq1);
> > + } else {
> > + idpf_qc_split_rx_queue_reset(rxq);
> > + }
> > }
> > - dev->data->rx_queue_state[rx_queue_id] =
> > RTE_ETH_QUEUE_STATE_STOPPED;
> > + if (!cpfl_rxq->hairpin_info.hairpin_q)
> > + dev->data->rx_queue_state[rx_queue_id] =
> > RTE_ETH_QUEUE_STATE_STOPPED;
> >
> > return 0;
> > }
> > @@ -1202,7 +1298,12 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev,
> > uint16_t tx_queue_id)
> >
> > cpfl_txq = dev->data->tx_queues[tx_queue_id];
> >
> > - err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
> > + if (cpfl_txq->hairpin_info.hairpin_q)
> > + err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
> > + tx_queue_id - cpfl_vport-
> > >nb_data_txq,
> > + false, false);
> > + else
> > + err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
> > if (err != 0) {
> > PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
> > tx_queue_id);
> > @@ -1215,10 +1316,17 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev,
> > uint16_t tx_queue_id)
> > if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
> > idpf_qc_single_tx_queue_reset(txq);
> > } else {
> > - idpf_qc_split_tx_descq_reset(txq);
> > - idpf_qc_split_tx_complq_reset(txq->complq);
> > + if (cpfl_txq->hairpin_info.hairpin_q) {
> > + cpfl_tx_hairpin_descq_reset(txq);
> > + cpfl_tx_hairpin_complq_reset(txq->complq);
> > + } else {
> > + idpf_qc_split_tx_descq_reset(txq);
> > + idpf_qc_split_tx_complq_reset(txq->complq);
> > + }
> > }
> > - dev->data->tx_queue_state[tx_queue_id] =
> > RTE_ETH_QUEUE_STATE_STOPPED;
> > +
> > + if (!cpfl_txq->hairpin_info.hairpin_q)
> > + dev->data->tx_queue_state[tx_queue_id] =
> > RTE_ETH_QUEUE_STATE_STOPPED;
> >
> > return 0;
> > }
> > @@ -1238,10 +1346,17 @@ cpfl_dev_tx_queue_release(struct rte_eth_dev
> > *dev, uint16_t qid) void cpfl_stop_queues(struct rte_eth_dev *dev)
> > {
> > + struct cpfl_vport *cpfl_vport =
> > + (struct cpfl_vport *)dev->data->dev_private;
> > struct cpfl_rx_queue *cpfl_rxq;
> > struct cpfl_tx_queue *cpfl_txq;
> > int i;
> >
> > + if (cpfl_vport->p2p_rx_bufq != NULL) {
> > + if (cpfl_switch_hairpin_bufq_complq(cpfl_vport, false) != 0)
> > + PMD_DRV_LOG(ERR, "Failed to stop hairpin Tx complq
> > and Rx bufq");
> > + }
> > +
> > for (i = 0; i < dev->data->nb_rx_queues; i++) {
> > cpfl_rxq = dev->data->rx_queues[i];
> > if (cpfl_rxq == NULL)
> > diff --git a/drivers/net/cpfl/cpfl_rxtx.h
> > b/drivers/net/cpfl/cpfl_rxtx.h index
> > 872ebc1bfd..42dfd07155 100644
> > --- a/drivers/net/cpfl/cpfl_rxtx.h
> > +++ b/drivers/net/cpfl/cpfl_rxtx.h
> > @@ -41,6 +41,17 @@
> >
> > #define CPFL_RX_BUF_STRIDE 64
> >
> > +/* The value written in the RX buffer queue tail register,
> > + * and in WritePTR field in the TX completion queue context,
> > + * must be a multiple of 8.
> > + */
> > +#define CPFL_HAIRPIN_Q_TAIL_AUX_VALUE 8
> > +
> > +struct virtchnl2_p2p_rx_buf_desc {
> > + __le64 reserve0;
> > + __le64 pkt_addr; /* Packet buffer address */ };
> > +
> > struct cpfl_rxq_hairpin_info {
> > bool hairpin_q; /* if rx queue is a hairpin queue */
> > uint16_t peer_txp;
> > @@ -102,4 +113,7 @@ int cpfl_hairpin_tx_complq_config(struct
> > cpfl_vport *cpfl_vport); int cpfl_hairpin_txq_config(struct
> > idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq); int
> > cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport); int
> > cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue
> > *cpfl_rxq);
> > +int cpfl_switch_hairpin_bufq_complq(struct cpfl_vport *cpfl_vport,
> > +bool on); int cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport
> > +*cpfl_vport, uint16_t
> > qid,
> > + bool rx, bool on);
> > #endif /* _CPFL_RXTX_H_ */
> > --
> > 2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH v4 13/13] net/cpfl: support hairpin bind/unbind
2023-05-30 3:59 ` Liu, Mingxia
@ 2023-05-31 10:54 ` Xing, Beilei
0 siblings, 0 replies; 164+ messages in thread
From: Xing, Beilei @ 2023-05-31 10:54 UTC (permalink / raw)
To: Liu, Mingxia, Wu, Jingjing; +Cc: dev, Wang, Xiao W
> -----Original Message-----
> From: Liu, Mingxia <mingxia.liu@intel.com>
> Sent: Tuesday, May 30, 2023 12:00 PM
> To: Xing, Beilei <beilei.xing@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Wang, Xiao W <xiao.w.wang@intel.com>
> Subject: RE: [PATCH v4 13/13] net/cpfl: support hairpin bind/unbind
>
>
>
> > -----Original Message-----
> > From: Xing, Beilei <beilei.xing@intel.com>
> > Sent: Friday, May 26, 2023 3:39 PM
> > To: Wu, Jingjing <jingjing.wu@intel.com>
> > Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> > <beilei.xing@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>
> > Subject: [PATCH v4 13/13] net/cpfl: support hairpin bind/unbind
> >
> > From: Beilei Xing <beilei.xing@intel.com>
> >
> > This patch supports hairpin_bind/unbind ops.
> >
> > Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > ---
> > drivers/net/cpfl/cpfl_ethdev.c | 137 +++++++++++++++++++++++++++++++++
> > drivers/net/cpfl/cpfl_rxtx.c | 28 +++++++
> > drivers/net/cpfl/cpfl_rxtx.h | 2 +
> > 3 files changed, 167 insertions(+)
> >
> > diff --git a/drivers/net/cpfl/cpfl_ethdev.c
> > b/drivers/net/cpfl/cpfl_ethdev.c index
> > d6dc1672f1..4b70441e27 100644
> > --- a/drivers/net/cpfl/cpfl_ethdev.c
> > +++ b/drivers/net/cpfl/cpfl_ethdev.c
> > @@ -1114,6 +1114,141 @@ cpfl_hairpin_get_peer_ports(struct
> rte_eth_dev
> > *dev, uint16_t *peer_ports,
> > return j;
> > }
> >
> >
> > static int
> > diff --git a/drivers/net/cpfl/cpfl_rxtx.c
> > b/drivers/net/cpfl/cpfl_rxtx.c index 38c48ad8c7..ef83a03c2b 100644
> > --- a/drivers/net/cpfl/cpfl_rxtx.c
> > +++ b/drivers/net/cpfl/cpfl_rxtx.c
> > @@ -1011,6 +1011,34 @@ cpfl_switch_hairpin_bufq_complq(struct
> > cpfl_vport *cpfl_vport, bool on)
> > return err;
> > }
> >
> > +int
> > +cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on) {
> > + struct idpf_vport *vport = &cpfl_vport->base;
> > + uint32_t type;
> > + int err, queue_id;
> > +
> > + type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
> > + queue_id = cpfl_vport->p2p_tx_complq->queue_id;
> > + err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
> > +
> > + return err;
> > +}
> > +
> > +int
> > +cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on) {
> > + struct idpf_vport *vport = &cpfl_vport->base;
> > + uint32_t type;
> > + int err, queue_id;
> > +
> > + type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
> > + queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
> > + err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
> > +
> > + return err;
> > +}
> > +
> [Liu, Mingxia] Can cpfl_switch_hairpin_bufq_complq() in patch 9/13 be
> optimized by calling cpfl_switch_hairpin_complq() and
> cpfl_switch_hairpin_bufq()?
Yes, the functions are duplicated. Refined in next version.
> > int
> > cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport,
> > uint16_t logic_qid,
> > bool rx, bool on)
> > diff --git a/drivers/net/cpfl/cpfl_rxtx.h
> > b/drivers/net/cpfl/cpfl_rxtx.h index
> > 42dfd07155..86e97541c4 100644
> > --- a/drivers/net/cpfl/cpfl_rxtx.h
> > +++ b/drivers/net/cpfl/cpfl_rxtx.h
> > @@ -114,6 +114,8 @@ int cpfl_hairpin_txq_config(struct idpf_vport
> > *vport, struct cpfl_tx_queue *cpfl int
> > cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport); int
> > cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue
> > *cpfl_rxq); int cpfl_switch_hairpin_bufq_complq(struct
> > cpfl_vport *cpfl_vport, bool on);
> > +int cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool
> > +on); int cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool
> > +on);
> > int cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t
> qid,
> > bool rx, bool on);
> > #endif /* _CPFL_RXTX_H_ */
> > --
> > 2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v7 00/14] net/cpfl: add hairpin queue support
2023-05-31 10:25 ` [PATCH v6 00/13] net/cpfl: add hairpin queue support beilei.xing
` (12 preceding siblings ...)
2023-05-31 10:25 ` [PATCH v6 13/13] net/cpfl: support hairpin bind/unbind beilei.xing
@ 2023-05-31 13:04 ` beilei.xing
2023-05-31 13:04 ` [PATCH v7 01/14] net/cpfl: refine structures beilei.xing
` (14 more replies)
13 siblings, 15 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 13:04 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patchset adds hairpin queue support.
v2 changes:
- change hairpin rx queus configuration sequence.
- code refine.
v3 changes:
- Refine the patchset based on the latest code.
v4 change:
- Remove hairpin rx buffer queue's sw_ring.
- Change hairpin rx queus configuration sequence in cpfl_hairpin_bind function.
- Refind hairpin queue setup and release.
v5 change:
- Fix memory leak during queue setup.
- Refine hairpin Rxq/Txq start/stop.
v6 change:
- Add sign-off.
v7 change:
- Update cpfl.rst
Beilei Xing (14):
net/cpfl: refine structures
common/idpf: support queue groups add/delete
net/cpfl: add haipin queue group during vport init
net/cpfl: support hairpin queue capbility get
net/cpfl: support hairpin queue setup and release
common/idpf: add queue config API
net/cpfl: support hairpin queue configuration
common/idpf: add switch queue API
net/cpfl: support hairpin queue start/stop
common/idpf: add irq map config API
net/cpfl: enable write back based on ITR expire
net/cpfl: support peer ports get
net/cpfl: support hairpin bind/unbind
doc: update the doc of CPFL PMD
doc/guides/nics/cpfl.rst | 7 +
drivers/common/idpf/idpf_common_device.c | 75 ++
drivers/common/idpf/idpf_common_device.h | 4 +
drivers/common/idpf/idpf_common_virtchnl.c | 138 +++-
drivers/common/idpf/idpf_common_virtchnl.h | 18 +
drivers/common/idpf/version.map | 6 +
drivers/net/cpfl/cpfl_ethdev.c | 610 ++++++++++++++--
drivers/net/cpfl/cpfl_ethdev.h | 35 +-
drivers/net/cpfl/cpfl_rxtx.c | 781 +++++++++++++++++++--
drivers/net/cpfl/cpfl_rxtx.h | 76 ++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 21 +-
11 files changed, 1652 insertions(+), 119 deletions(-)
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v7 01/14] net/cpfl: refine structures
2023-05-31 13:04 ` [PATCH v7 00/14] net/cpfl: add hairpin queue support beilei.xing
@ 2023-05-31 13:04 ` beilei.xing
2023-05-31 13:04 ` [PATCH v7 02/14] common/idpf: support queue groups add/delete beilei.xing
` (13 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 13:04 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch refines some structures to support hairpin queue,
cpfl_rx_queue/cpfl_tx_queue/cpfl_vport.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 85 +++++++-----
drivers/net/cpfl/cpfl_ethdev.h | 6 +-
drivers/net/cpfl/cpfl_rxtx.c | 175 +++++++++++++++++-------
drivers/net/cpfl/cpfl_rxtx.h | 8 ++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 17 +--
5 files changed, 196 insertions(+), 95 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 7528a14d05..e587155db6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -124,7 +124,8 @@ static int
cpfl_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct rte_eth_link new_link;
unsigned int i;
@@ -156,7 +157,8 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,
static int
cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
dev_info->max_rx_queues = base->caps.max_rx_q;
@@ -216,7 +218,8 @@ cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
static int
cpfl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
/* mtu setting is forbidden if port is start */
if (dev->data->dev_started) {
@@ -256,12 +259,12 @@ static uint64_t
cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
{
uint64_t mbuf_alloc_failed = 0;
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i = 0;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- mbuf_alloc_failed += __atomic_load_n(&rxq->rx_stats.mbuf_alloc_failed,
+ cpfl_rxq = dev->data->rx_queues[i];
+ mbuf_alloc_failed += __atomic_load_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,
__ATOMIC_RELAXED);
}
@@ -271,8 +274,8 @@ cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
static int
cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
int ret;
@@ -305,20 +308,20 @@ cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
static void
cpfl_reset_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- __atomic_store_n(&rxq->rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
+ cpfl_rxq = dev->data->rx_queues[i];
+ __atomic_store_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
}
}
static int
cpfl_dev_stats_reset(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
int ret;
@@ -343,8 +346,8 @@ static int cpfl_dev_xstats_reset(struct rte_eth_dev *dev)
static int cpfl_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned int n)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
unsigned int i;
int ret;
@@ -459,7 +462,8 @@ cpfl_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t idx, shift;
int ret = 0;
@@ -498,7 +502,8 @@ cpfl_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t idx, shift;
int ret = 0;
@@ -536,7 +541,8 @@ static int
cpfl_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
int ret = 0;
@@ -601,7 +607,8 @@ static int
cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
int ret = 0;
@@ -638,7 +645,8 @@ cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,
static int
cpfl_dev_configure(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct rte_eth_conf *conf = &dev->data->dev_conf;
struct idpf_adapter *base = vport->adapter;
int ret;
@@ -710,7 +718,8 @@ cpfl_dev_configure(struct rte_eth_dev *dev)
static int
cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
uint16_t nb_rx_queues = dev->data->nb_rx_queues;
return idpf_vport_irq_map_config(vport, nb_rx_queues);
@@ -719,14 +728,14 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
static int
cpfl_start_queues(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
- struct idpf_tx_queue *txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
int err = 0;
int i;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL || txq->tx_deferred_start)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
continue;
err = cpfl_tx_queue_start(dev, i);
if (err != 0) {
@@ -736,8 +745,8 @@ cpfl_start_queues(struct rte_eth_dev *dev)
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- if (rxq == NULL || rxq->rx_deferred_start)
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)
continue;
err = cpfl_rx_queue_start(dev, i);
if (err != 0) {
@@ -752,7 +761,8 @@ cpfl_start_queues(struct rte_eth_dev *dev)
static int
cpfl_dev_start(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(base);
uint16_t num_allocated_vectors = base->caps.num_allocated_vectors;
@@ -813,7 +823,8 @@ cpfl_dev_start(struct rte_eth_dev *dev)
static int
cpfl_dev_stop(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
if (dev->data->dev_started == 0)
return 0;
@@ -832,7 +843,8 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
static int
cpfl_dev_close(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
@@ -842,7 +854,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
adapter->cur_vport_nb--;
dev->data->dev_private = NULL;
adapter->vports[vport->sw_idx] = NULL;
- rte_free(vport);
+ rte_free(cpfl_vport);
return 0;
}
@@ -1047,7 +1059,7 @@ cpfl_find_vport(struct cpfl_adapter_ext *adapter, uint32_t vport_id)
int i;
for (i = 0; i < adapter->cur_vport_nb; i++) {
- vport = adapter->vports[i];
+ vport = &adapter->vports[i]->base;
if (vport->vport_id != vport_id)
continue;
else
@@ -1275,7 +1287,8 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter)
static int
cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_vport_param *param = init_params;
struct cpfl_adapter_ext *adapter = param->adapter;
/* for sending create vport virtchnl msg prepare */
@@ -1300,7 +1313,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
goto err;
}
- adapter->vports[param->idx] = vport;
+ adapter->vports[param->idx] = cpfl_vport;
adapter->cur_vports |= RTE_BIT32(param->devarg_id);
adapter->cur_vport_nb++;
@@ -1415,7 +1428,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
snprintf(name, sizeof(name), "cpfl_%s_vport_0",
pci_dev->device.name);
retval = rte_eth_dev_create(&pci_dev->device, name,
- sizeof(struct idpf_vport),
+ sizeof(struct cpfl_vport),
NULL, NULL, cpfl_dev_vport_init,
&vport_param);
if (retval != 0)
@@ -1433,7 +1446,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
pci_dev->device.name,
devargs.req_vports[i]);
retval = rte_eth_dev_create(&pci_dev->device, name,
- sizeof(struct idpf_vport),
+ sizeof(struct cpfl_vport),
NULL, NULL, cpfl_dev_vport_init,
&vport_param);
if (retval != 0)
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 200dfcac02..81fe9ac4c3 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -69,13 +69,17 @@ struct cpfl_devargs {
uint16_t req_vport_nb;
};
+struct cpfl_vport {
+ struct idpf_vport base;
+};
+
struct cpfl_adapter_ext {
TAILQ_ENTRY(cpfl_adapter_ext) next;
struct idpf_adapter base;
char name[CPFL_ADAPTER_NAME_LEN];
- struct idpf_vport **vports;
+ struct cpfl_vport **vports;
uint16_t max_vport_nb;
uint16_t cur_vports; /* bit mask of created vport */
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 75021c3c54..04a51b8d15 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -128,7 +128,8 @@ cpfl_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *rxq,
uint16_t nb_desc, unsigned int socket_id,
struct rte_mempool *mp, uint8_t bufq_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct idpf_hw *hw = &base->hw;
const struct rte_memzone *mz;
@@ -220,15 +221,69 @@ cpfl_rx_split_bufq_release(struct idpf_rx_queue *bufq)
rte_free(bufq);
}
+static void
+cpfl_rx_queue_release(void *rxq)
+{
+ struct cpfl_rx_queue *cpfl_rxq = rxq;
+ struct idpf_rx_queue *q = NULL;
+
+ if (cpfl_rxq == NULL)
+ return;
+
+ q = &cpfl_rxq->base;
+
+ /* Split queue */
+ if (!q->adapter->is_rx_singleq) {
+ if (q->bufq2)
+ cpfl_rx_split_bufq_release(q->bufq2);
+
+ if (q->bufq1)
+ cpfl_rx_split_bufq_release(q->bufq1);
+
+ rte_free(cpfl_rxq);
+ return;
+ }
+
+ /* Single queue */
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(cpfl_rxq);
+}
+
+static void
+cpfl_tx_queue_release(void *txq)
+{
+ struct cpfl_tx_queue *cpfl_txq = txq;
+ struct idpf_tx_queue *q = NULL;
+
+ if (cpfl_txq == NULL)
+ return;
+
+ q = &cpfl_txq->base;
+
+ if (q->complq) {
+ rte_memzone_free(q->complq->mz);
+ rte_free(q->complq);
+ }
+
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(cpfl_txq);
+}
+
int
cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct idpf_hw *hw = &base->hw;
+ struct cpfl_rx_queue *cpfl_rxq;
const struct rte_memzone *mz;
struct idpf_rx_queue *rxq;
uint16_t rx_free_thresh;
@@ -248,21 +303,23 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Free memory if needed */
if (dev->data->rx_queues[queue_idx] != NULL) {
- idpf_qc_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
dev->data->rx_queues[queue_idx] = NULL;
}
/* Setup Rx queue */
- rxq = rte_zmalloc_socket("cpfl rxq",
- sizeof(struct idpf_rx_queue),
+ cpfl_rxq = rte_zmalloc_socket("cpfl rxq",
+ sizeof(struct cpfl_rx_queue),
RTE_CACHE_LINE_SIZE,
socket_id);
- if (rxq == NULL) {
+ if (cpfl_rxq == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
ret = -ENOMEM;
goto err_rxq_alloc;
}
+ rxq = &cpfl_rxq->base;
+
is_splitq = !!(vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
rxq->mp = mp;
@@ -329,7 +386,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
rxq->q_set = true;
- dev->data->rx_queues[queue_idx] = rxq;
+ dev->data->rx_queues[queue_idx] = cpfl_rxq;
return 0;
@@ -349,7 +406,8 @@ cpfl_tx_complq_setup(struct rte_eth_dev *dev, struct idpf_tx_queue *txq,
uint16_t queue_idx, uint16_t nb_desc,
unsigned int socket_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
const struct rte_memzone *mz;
struct idpf_tx_queue *cq;
int ret;
@@ -397,9 +455,11 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t tx_rs_thresh, tx_free_thresh;
+ struct cpfl_tx_queue *cpfl_txq;
struct idpf_hw *hw = &base->hw;
const struct rte_memzone *mz;
struct idpf_tx_queue *txq;
@@ -419,21 +479,23 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Free memory if needed. */
if (dev->data->tx_queues[queue_idx] != NULL) {
- idpf_qc_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
dev->data->tx_queues[queue_idx] = NULL;
}
/* Allocate the TX queue data structure. */
- txq = rte_zmalloc_socket("cpfl txq",
- sizeof(struct idpf_tx_queue),
+ cpfl_txq = rte_zmalloc_socket("cpfl txq",
+ sizeof(struct cpfl_tx_queue),
RTE_CACHE_LINE_SIZE,
socket_id);
- if (txq == NULL) {
+ if (cpfl_txq == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
ret = -ENOMEM;
goto err_txq_alloc;
}
+ txq = &cpfl_txq->base;
+
is_splitq = !!(vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
txq->nb_tx_desc = nb_desc;
@@ -487,7 +549,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
queue_idx * vport->chunks_info.tx_qtail_spacing);
txq->ops = &def_txq_ops;
txq->q_set = true;
- dev->data->tx_queues[queue_idx] = txq;
+ dev->data->tx_queues[queue_idx] = cpfl_txq;
return 0;
@@ -503,6 +565,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
+ struct cpfl_rx_queue *cpfl_rxq;
struct idpf_rx_queue *rxq;
uint16_t max_pkt_len;
uint32_t frame_size;
@@ -511,7 +574,8 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (rx_queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
- rxq = dev->data->rx_queues[rx_queue_id];
+ cpfl_rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = &cpfl_rxq->base;
if (rxq == NULL || !rxq->q_set) {
PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
@@ -575,9 +639,10 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_rx_queue *rxq =
- dev->data->rx_queues[rx_queue_id];
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq = dev->data->rx_queues[rx_queue_id];
+ struct idpf_rx_queue *rxq = &cpfl_rxq->base;
int err = 0;
err = idpf_vc_rxq_config(vport, rxq);
@@ -610,15 +675,15 @@ cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
if (tx_queue_id >= dev->data->nb_tx_queues)
return -EINVAL;
- txq = dev->data->tx_queues[tx_queue_id];
+ cpfl_txq = dev->data->tx_queues[tx_queue_id];
/* Init the RX tail register. */
- IDPF_PCI_REG_WRITE(txq->qtx_tail, 0);
+ IDPF_PCI_REG_WRITE(cpfl_txq->base.qtx_tail, 0);
return 0;
}
@@ -626,12 +691,13 @@ cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
int
cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_tx_queue *txq =
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_tx_queue *cpfl_txq =
dev->data->tx_queues[tx_queue_id];
int err = 0;
- err = idpf_vc_txq_config(vport, txq);
+ err = idpf_vc_txq_config(vport, &cpfl_txq->base);
if (err != 0) {
PMD_DRV_LOG(ERR, "Fail to configure Tx queue %u", tx_queue_id);
return err;
@@ -650,7 +716,7 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
tx_queue_id);
} else {
- txq->q_started = true;
+ cpfl_txq->base.q_started = true;
dev->data->tx_queue_state[tx_queue_id] =
RTE_ETH_QUEUE_STATE_STARTED;
}
@@ -661,13 +727,16 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
int
cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq;
struct idpf_rx_queue *rxq;
int err;
if (rx_queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
+ cpfl_rxq = dev->data->rx_queues[rx_queue_id];
err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
@@ -675,7 +744,7 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return err;
}
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = &cpfl_rxq->base;
rxq->q_started = false;
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
rxq->ops->release_mbufs(rxq);
@@ -693,13 +762,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_tx_queue *cpfl_txq;
struct idpf_tx_queue *txq;
int err;
if (tx_queue_id >= dev->data->nb_tx_queues)
return -EINVAL;
+ cpfl_txq = dev->data->tx_queues[tx_queue_id];
+
err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
@@ -707,7 +780,7 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
return err;
}
- txq = dev->data->tx_queues[tx_queue_id];
+ txq = &cpfl_txq->base;
txq->q_started = false;
txq->ops->release_mbufs(txq);
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
@@ -724,25 +797,25 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
void
cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- idpf_qc_rx_queue_release(dev->data->rx_queues[qid]);
+ cpfl_rx_queue_release(dev->data->rx_queues[qid]);
}
void
cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- idpf_qc_tx_queue_release(dev->data->tx_queues[qid]);
+ cpfl_tx_queue_release(dev->data->tx_queues[qid]);
}
void
cpfl_stop_queues(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
- struct idpf_tx_queue *txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
int i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- if (rxq == NULL)
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq == NULL)
continue;
if (cpfl_rx_queue_stop(dev, i) != 0)
@@ -750,8 +823,8 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL)
continue;
if (cpfl_tx_queue_stop(dev, i) != 0)
@@ -762,9 +835,10 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
void
cpfl_set_rx_function(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
#ifdef RTE_ARCH_X86
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i;
if (cpfl_rx_vec_dev_check_default(dev) == CPFL_VECTOR_PATH &&
@@ -790,8 +864,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- (void)idpf_qc_splitq_rx_vec_setup(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ (void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
if (vport->rx_use_avx512) {
@@ -810,8 +884,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
} else {
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- (void)idpf_qc_singleq_rx_vec_setup(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ (void)idpf_qc_singleq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
if (vport->rx_use_avx512) {
@@ -860,10 +934,11 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
void
cpfl_set_tx_function(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
#ifdef RTE_ARCH_X86
#ifdef CC_AVX512_SUPPORT
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
int i;
#endif /* CC_AVX512_SUPPORT */
@@ -878,8 +953,8 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
vport->tx_use_avx512 = true;
if (vport->tx_use_avx512) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- idpf_qc_tx_vec_avx512_setup(txq);
+ cpfl_txq = dev->data->tx_queues[i];
+ idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);
}
}
}
@@ -916,10 +991,10 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
#ifdef CC_AVX512_SUPPORT
if (vport->tx_use_avx512) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL)
continue;
- idpf_qc_tx_vec_avx512_setup(txq);
+ idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);
}
PMD_DRV_LOG(NOTICE,
"Using Single AVX512 Vector Tx (port %d).",
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index fb267d38c8..bfb9ad97bd 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -23,6 +23,14 @@
#define CPFL_SUPPORT_CHAIN_NUM 5
+struct cpfl_rx_queue {
+ struct idpf_rx_queue base;
+};
+
+struct cpfl_tx_queue {
+ struct idpf_tx_queue base;
+};
+
int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
index 665418d27d..5690b17911 100644
--- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
+++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
@@ -76,15 +76,16 @@ cpfl_rx_splitq_vec_default(struct idpf_rx_queue *rxq)
static inline int
cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_rx_queue *rxq;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq;
int i, default_ret, splitq_ret, ret = CPFL_SCALAR_PATH;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- default_ret = cpfl_rx_vec_queue_default(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
- splitq_ret = cpfl_rx_splitq_vec_default(rxq);
+ splitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq->base);
ret = splitq_ret && default_ret;
} else {
ret = default_ret;
@@ -100,12 +101,12 @@ static inline int
cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)
{
int i;
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
int ret = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- ret = cpfl_tx_vec_queue_default(txq);
+ cpfl_txq = dev->data->tx_queues[i];
+ ret = cpfl_tx_vec_queue_default(&cpfl_txq->base);
if (ret == CPFL_SCALAR_PATH)
return CPFL_SCALAR_PATH;
}
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v7 02/14] common/idpf: support queue groups add/delete
2023-05-31 13:04 ` [PATCH v7 00/14] net/cpfl: add hairpin queue support beilei.xing
2023-05-31 13:04 ` [PATCH v7 01/14] net/cpfl: refine structures beilei.xing
@ 2023-05-31 13:04 ` beilei.xing
2023-05-31 13:04 ` [PATCH v7 03/14] net/cpfl: add haipin queue group during vport init beilei.xing
` (12 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 13:04 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds queue group add/delete virtual channel support.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 66 ++++++++++++++++++++++
drivers/common/idpf/idpf_common_virtchnl.h | 9 +++
drivers/common/idpf/version.map | 2 +
3 files changed, 77 insertions(+)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index b713678634..a3fe55c897 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -359,6 +359,72 @@ idpf_vc_vport_destroy(struct idpf_vport *vport)
return err;
}
+int
+idpf_vc_queue_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *p2p_queue_grps_info,
+ uint8_t *p2p_queue_grps_out)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_cmd_info args;
+ int size, qg_info_size;
+ int err = -1;
+
+ size = sizeof(*p2p_queue_grps_info) +
+ (p2p_queue_grps_info->qg_info.num_queue_groups - 1) *
+ sizeof(struct virtchnl2_queue_group_info);
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_ADD_QUEUE_GROUPS;
+ args.in_args = (uint8_t *)p2p_queue_grps_info;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ if (err != 0) {
+ DRV_LOG(ERR,
+ "Failed to execute command of VIRTCHNL2_OP_ADD_QUEUE_GROUPS");
+ return err;
+ }
+
+ rte_memcpy(p2p_queue_grps_out, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
+ return 0;
+}
+
+int idpf_vc_queue_grps_del(struct idpf_vport *vport,
+ uint16_t num_q_grps,
+ struct virtchnl2_queue_group_id *qg_ids)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_delete_queue_groups *vc_del_q_grps;
+ struct idpf_cmd_info args;
+ int size;
+ int err;
+
+ size = sizeof(*vc_del_q_grps) +
+ (num_q_grps - 1) * sizeof(struct virtchnl2_queue_group_id);
+ vc_del_q_grps = rte_zmalloc("vc_del_q_grps", size, 0);
+
+ vc_del_q_grps->vport_id = vport->vport_id;
+ vc_del_q_grps->num_queue_groups = num_q_grps;
+ memcpy(vc_del_q_grps->qg_ids, qg_ids,
+ num_q_grps * sizeof(struct virtchnl2_queue_group_id));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_DEL_QUEUE_GROUPS;
+ args.in_args = (uint8_t *)vc_del_q_grps;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_DEL_QUEUE_GROUPS");
+
+ rte_free(vc_del_q_grps);
+ return err;
+}
+
int
idpf_vc_rss_key_set(struct idpf_vport *vport)
{
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index c45295290e..58b16e1c5d 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -64,4 +64,13 @@ int idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
__rte_internal
int idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
u16 *buff_count, struct idpf_dma_mem **buffs);
+__rte_internal
+int idpf_vc_queue_grps_del(struct idpf_vport *vport,
+ uint16_t num_q_grps,
+ struct virtchnl2_queue_group_id *qg_ids);
+__rte_internal
+int
+idpf_vc_queue_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *ptp_queue_grps_info,
+ uint8_t *ptp_queue_grps_out);
#endif /* _IDPF_COMMON_VIRTCHNL_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 70334a1b03..01d18f3f3f 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -43,6 +43,8 @@ INTERNAL {
idpf_vc_irq_map_unmap_config;
idpf_vc_one_msg_read;
idpf_vc_ptype_info_query;
+ idpf_vc_queue_grps_add;
+ idpf_vc_queue_grps_del;
idpf_vc_queue_switch;
idpf_vc_queues_ena_dis;
idpf_vc_rss_hash_get;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v7 03/14] net/cpfl: add haipin queue group during vport init
2023-05-31 13:04 ` [PATCH v7 00/14] net/cpfl: add hairpin queue support beilei.xing
2023-05-31 13:04 ` [PATCH v7 01/14] net/cpfl: refine structures beilei.xing
2023-05-31 13:04 ` [PATCH v7 02/14] common/idpf: support queue groups add/delete beilei.xing
@ 2023-05-31 13:04 ` beilei.xing
2023-05-31 13:04 ` [PATCH v7 04/14] net/cpfl: support hairpin queue capbility get beilei.xing
` (11 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 13:04 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds haipin queue group during vport init.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 133 +++++++++++++++++++++++++++++++++
drivers/net/cpfl/cpfl_ethdev.h | 18 +++++
drivers/net/cpfl/cpfl_rxtx.h | 7 ++
3 files changed, 158 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index e587155db6..c1273a7478 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -840,6 +840,20 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
return 0;
}
+static int
+cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
+{
+ struct virtchnl2_queue_group_id qg_ids[CPFL_P2P_NB_QUEUE_GRPS] = {0};
+ int ret = 0;
+
+ qg_ids[0].queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
+ qg_ids[0].queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
+ ret = idpf_vc_queue_grps_del(vport, CPFL_P2P_NB_QUEUE_GRPS, qg_ids);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to delete p2p queue groups");
+ return ret;
+}
+
static int
cpfl_dev_close(struct rte_eth_dev *dev)
{
@@ -848,7 +862,12 @@ cpfl_dev_close(struct rte_eth_dev *dev)
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
+
+ if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
+ cpfl_p2p_queue_grps_del(vport);
+
idpf_vport_deinit(vport);
+ rte_free(cpfl_vport->p2p_q_chunks_info);
adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
adapter->cur_vport_nb--;
@@ -1284,6 +1303,96 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter)
return vport_idx;
}
+static int
+cpfl_p2p_q_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *p2p_queue_grps_info,
+ uint8_t *p2p_q_vc_out_info)
+{
+ int ret;
+
+ p2p_queue_grps_info->vport_id = vport->vport_id;
+ p2p_queue_grps_info->qg_info.num_queue_groups = CPFL_P2P_NB_QUEUE_GRPS;
+ p2p_queue_grps_info->qg_info.groups[0].num_rx_q = CPFL_MAX_P2P_NB_QUEUES;
+ p2p_queue_grps_info->qg_info.groups[0].num_rx_bufq = CPFL_P2P_NB_RX_BUFQ;
+ p2p_queue_grps_info->qg_info.groups[0].num_tx_q = CPFL_MAX_P2P_NB_QUEUES;
+ p2p_queue_grps_info->qg_info.groups[0].num_tx_complq = CPFL_P2P_NB_TX_COMPLQ;
+ p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
+ p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
+ p2p_queue_grps_info->qg_info.groups[0].rx_q_grp_info.rss_lut_size = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.tx_tc = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.priority = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.is_sp = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.pir_weight = 0;
+
+ ret = idpf_vc_queue_grps_add(vport, p2p_queue_grps_info, p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to add p2p queue groups.");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
+ struct virtchnl2_add_queue_groups *p2p_q_vc_out_info)
+{
+ struct p2p_queue_chunks_info *p2p_q_chunks_info = cpfl_vport->p2p_q_chunks_info;
+ struct virtchnl2_queue_reg_chunks *vc_chunks_out;
+ int i, type;
+
+ if (p2p_q_vc_out_info->qg_info.groups[0].qg_id.queue_group_type !=
+ VIRTCHNL2_QUEUE_GROUP_P2P) {
+ PMD_DRV_LOG(ERR, "Add queue group response mismatch.");
+ return -EINVAL;
+ }
+
+ vc_chunks_out = &p2p_q_vc_out_info->qg_info.groups[0].chunks;
+
+ for (i = 0; i < vc_chunks_out->num_chunks; i++) {
+ type = vc_chunks_out->chunks[i].type;
+ switch (type) {
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ p2p_q_chunks_info->tx_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->tx_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->tx_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ p2p_q_chunks_info->rx_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->rx_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->rx_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ p2p_q_chunks_info->tx_compl_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->tx_compl_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->tx_compl_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ p2p_q_chunks_info->rx_buf_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->rx_buf_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->rx_buf_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported queue type");
+ break;
+ }
+ }
+
+ return 0;
+}
+
static int
cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
{
@@ -1293,6 +1402,8 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
struct cpfl_adapter_ext *adapter = param->adapter;
/* for sending create vport virtchnl msg prepare */
struct virtchnl2_create_vport create_vport_info;
+ struct virtchnl2_add_queue_groups p2p_queue_grps_info;
+ uint8_t p2p_q_vc_out_info[IDPF_DFLT_MBX_BUF_SIZE] = {0};
int ret = 0;
dev->dev_ops = &cpfl_eth_dev_ops;
@@ -1327,6 +1438,28 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
&dev->data->mac_addrs[0]);
+ if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
+ memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
+ ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to add p2p queue group.");
+ return 0;
+ }
+ cpfl_vport->p2p_q_chunks_info = rte_zmalloc(NULL,
+ sizeof(struct p2p_queue_chunks_info), 0);
+ if (cpfl_vport->p2p_q_chunks_info == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate p2p queue info.");
+ cpfl_p2p_queue_grps_del(vport);
+ return 0;
+ }
+ ret = cpfl_p2p_queue_info_init(cpfl_vport,
+ (struct virtchnl2_add_queue_groups *)p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to init p2p queue info.");
+ cpfl_p2p_queue_grps_del(vport);
+ }
+ }
+
return 0;
err_mac_addrs:
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 81fe9ac4c3..666d46a44a 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -56,6 +56,7 @@
/* Device IDs */
#define IDPF_DEV_ID_CPF 0x1453
+#define VIRTCHNL2_QUEUE_GROUP_P2P 0x100
struct cpfl_vport_param {
struct cpfl_adapter_ext *adapter;
@@ -69,8 +70,25 @@ struct cpfl_devargs {
uint16_t req_vport_nb;
};
+struct p2p_queue_chunks_info {
+ uint32_t tx_start_qid;
+ uint32_t rx_start_qid;
+ uint32_t tx_compl_start_qid;
+ uint32_t rx_buf_start_qid;
+
+ uint64_t tx_qtail_start;
+ uint32_t tx_qtail_spacing;
+ uint64_t rx_qtail_start;
+ uint32_t rx_qtail_spacing;
+ uint64_t tx_compl_qtail_start;
+ uint32_t tx_compl_qtail_spacing;
+ uint64_t rx_buf_qtail_start;
+ uint32_t rx_buf_qtail_spacing;
+};
+
struct cpfl_vport {
struct idpf_vport base;
+ struct p2p_queue_chunks_info *p2p_q_chunks_info;
};
struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index bfb9ad97bd..1fe65778f0 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -13,6 +13,13 @@
#define CPFL_MIN_RING_DESC 32
#define CPFL_MAX_RING_DESC 4096
#define CPFL_DMA_MEM_ALIGN 4096
+
+#define CPFL_MAX_P2P_NB_QUEUES 16
+#define CPFL_P2P_NB_RX_BUFQ 1
+#define CPFL_P2P_NB_TX_COMPLQ 1
+#define CPFL_P2P_NB_QUEUE_GRPS 1
+#define CPFL_P2P_QUEUE_GRP_ID 1
+
/* Base address of the HW descriptor ring should be 128B aligned. */
#define CPFL_RING_BASE_ALIGN 128
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v7 04/14] net/cpfl: support hairpin queue capbility get
2023-05-31 13:04 ` [PATCH v7 00/14] net/cpfl: add hairpin queue support beilei.xing
` (2 preceding siblings ...)
2023-05-31 13:04 ` [PATCH v7 03/14] net/cpfl: add haipin queue group during vport init beilei.xing
@ 2023-05-31 13:04 ` beilei.xing
2023-05-31 13:04 ` [PATCH v7 05/14] net/cpfl: support hairpin queue setup and release beilei.xing
` (10 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 13:04 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch adds hairpin_cap_get ops support.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 18 ++++++++++++++++++
drivers/net/cpfl/cpfl_rxtx.h | 3 +++
2 files changed, 21 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index c1273a7478..40b4515539 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -154,6 +154,23 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,
return rte_eth_linkstatus_set(dev, &new_link);
}
+static int
+cpfl_hairpin_cap_get(struct rte_eth_dev *dev,
+ struct rte_eth_hairpin_cap *cap)
+{
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+
+ if (cpfl_vport->p2p_q_chunks_info == NULL)
+ return -ENOTSUP;
+
+ cap->max_nb_queues = CPFL_MAX_P2P_NB_QUEUES;
+ cap->max_rx_2_tx = CPFL_MAX_HAIRPINQ_RX_2_TX;
+ cap->max_tx_2_rx = CPFL_MAX_HAIRPINQ_TX_2_RX;
+ cap->max_nb_desc = CPFL_MAX_HAIRPINQ_NB_DESC;
+
+ return 0;
+}
+
static int
cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
@@ -904,6 +921,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.xstats_get = cpfl_dev_xstats_get,
.xstats_get_names = cpfl_dev_xstats_get_names,
.xstats_reset = cpfl_dev_xstats_reset,
+ .hairpin_cap_get = cpfl_hairpin_cap_get,
};
static int
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 1fe65778f0..a4a164d462 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -14,6 +14,9 @@
#define CPFL_MAX_RING_DESC 4096
#define CPFL_DMA_MEM_ALIGN 4096
+#define CPFL_MAX_HAIRPINQ_RX_2_TX 1
+#define CPFL_MAX_HAIRPINQ_TX_2_RX 1
+#define CPFL_MAX_HAIRPINQ_NB_DESC 1024
#define CPFL_MAX_P2P_NB_QUEUES 16
#define CPFL_P2P_NB_RX_BUFQ 1
#define CPFL_P2P_NB_TX_COMPLQ 1
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v7 05/14] net/cpfl: support hairpin queue setup and release
2023-05-31 13:04 ` [PATCH v7 00/14] net/cpfl: add hairpin queue support beilei.xing
` (3 preceding siblings ...)
2023-05-31 13:04 ` [PATCH v7 04/14] net/cpfl: support hairpin queue capbility get beilei.xing
@ 2023-05-31 13:04 ` beilei.xing
2023-05-31 13:04 ` [PATCH v7 06/14] common/idpf: add queue config API beilei.xing
` (9 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 13:04 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
Support hairpin Rx/Tx queue setup and release.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 6 +
drivers/net/cpfl/cpfl_ethdev.h | 11 +
drivers/net/cpfl/cpfl_rxtx.c | 364 +++++++++++++++++++++++-
drivers/net/cpfl/cpfl_rxtx.h | 36 +++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 4 +
5 files changed, 420 insertions(+), 1 deletion(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 40b4515539..b17c538ec2 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -879,6 +879,10 @@ cpfl_dev_close(struct rte_eth_dev *dev)
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
+ if (cpfl_vport->p2p_mp) {
+ rte_mempool_free(cpfl_vport->p2p_mp);
+ cpfl_vport->p2p_mp = NULL;
+ }
if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
cpfl_p2p_queue_grps_del(vport);
@@ -922,6 +926,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.xstats_get_names = cpfl_dev_xstats_get_names,
.xstats_reset = cpfl_dev_xstats_reset,
.hairpin_cap_get = cpfl_hairpin_cap_get,
+ .rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
+ .tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
};
static int
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 666d46a44a..2e42354f70 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -89,6 +89,17 @@ struct p2p_queue_chunks_info {
struct cpfl_vport {
struct idpf_vport base;
struct p2p_queue_chunks_info *p2p_q_chunks_info;
+
+ struct rte_mempool *p2p_mp;
+
+ uint16_t nb_data_rxq;
+ uint16_t nb_data_txq;
+ uint16_t nb_p2p_rxq;
+ uint16_t nb_p2p_txq;
+
+ struct idpf_rx_queue *p2p_rx_bufq;
+ struct idpf_tx_queue *p2p_tx_complq;
+ bool p2p_manual_bind;
};
struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 04a51b8d15..90b408d1f4 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -10,6 +10,67 @@
#include "cpfl_rxtx.h"
#include "cpfl_rxtx_vec_common.h"
+static inline void
+cpfl_tx_hairpin_descq_reset(struct idpf_tx_queue *txq)
+{
+ uint32_t i, size;
+
+ if (!txq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+ return;
+ }
+
+ size = txq->nb_tx_desc * CPFL_P2P_DESC_LEN;
+ for (i = 0; i < size; i++)
+ ((volatile char *)txq->desc_ring)[i] = 0;
+}
+
+static inline void
+cpfl_tx_hairpin_complq_reset(struct idpf_tx_queue *cq)
+{
+ uint32_t i, size;
+
+ if (!cq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to complq is NULL");
+ return;
+ }
+
+ size = cq->nb_tx_desc * CPFL_P2P_DESC_LEN;
+ for (i = 0; i < size; i++)
+ ((volatile char *)cq->compl_ring)[i] = 0;
+}
+
+static inline void
+cpfl_rx_hairpin_descq_reset(struct idpf_rx_queue *rxq)
+{
+ uint16_t len;
+ uint32_t i;
+
+ if (!rxq)
+ return;
+
+ len = rxq->nb_rx_desc;
+ for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
+ ((volatile char *)rxq->rx_ring)[i] = 0;
+}
+
+static inline void
+cpfl_rx_hairpin_bufq_reset(struct idpf_rx_queue *rxbq)
+{
+ uint16_t len;
+ uint32_t i;
+
+ if (!rxbq)
+ return;
+
+ len = rxbq->nb_rx_desc;
+ for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
+ ((volatile char *)rxbq->rx_ring)[i] = 0;
+
+ rxbq->bufq1 = NULL;
+ rxbq->bufq2 = NULL;
+}
+
static uint64_t
cpfl_rx_offload_convert(uint64_t offload)
{
@@ -234,7 +295,10 @@ cpfl_rx_queue_release(void *rxq)
/* Split queue */
if (!q->adapter->is_rx_singleq) {
- if (q->bufq2)
+ /* the mz is shared between Tx/Rx hairpin, let Rx_release
+ * free the buf, q->bufq1->mz and q->mz.
+ */
+ if (!cpfl_rxq->hairpin_info.hairpin_q && q->bufq2)
cpfl_rx_split_bufq_release(q->bufq2);
if (q->bufq1)
@@ -385,6 +449,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
}
+ cpfl_vport->nb_data_rxq++;
rxq->q_set = true;
dev->data->rx_queues[queue_idx] = cpfl_rxq;
@@ -548,6 +613,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
queue_idx * vport->chunks_info.tx_qtail_spacing);
txq->ops = &def_txq_ops;
+ cpfl_vport->nb_data_txq++;
txq->q_set = true;
dev->data->tx_queues[queue_idx] = cpfl_txq;
@@ -562,6 +628,300 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return ret;
}
+static int
+cpfl_rx_hairpin_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,
+ uint16_t logic_qid, uint16_t nb_desc)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct rte_mempool *mp;
+ char pool_name[RTE_MEMPOOL_NAMESIZE];
+
+ mp = cpfl_vport->p2p_mp;
+ if (!mp) {
+ snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "p2p_mb_pool_%u",
+ dev->data->port_id);
+ mp = rte_pktmbuf_pool_create(pool_name, CPFL_P2P_NB_MBUF * CPFL_MAX_P2P_NB_QUEUES,
+ CPFL_P2P_CACHE_SIZE, 0, CPFL_P2P_MBUF_SIZE,
+ dev->device->numa_node);
+ if (!mp) {
+ PMD_INIT_LOG(ERR, "Failed to allocate mbuf pool for p2p");
+ return -ENOMEM;
+ }
+ cpfl_vport->p2p_mp = mp;
+ }
+
+ bufq->mp = mp;
+ bufq->nb_rx_desc = nb_desc;
+ bufq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_buf_start_qid,
+ logic_qid);
+ bufq->port_id = dev->data->port_id;
+ bufq->adapter = adapter;
+ bufq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
+
+ bufq->q_set = true;
+ bufq->ops = &def_rxq_ops;
+
+ return 0;
+}
+
+int
+cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf)
+{
+ struct cpfl_vport *cpfl_vport = (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter_base = vport->adapter;
+ uint16_t logic_qid = cpfl_vport->nb_p2p_rxq;
+ struct cpfl_rxq_hairpin_info *hairpin_info;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct idpf_rx_queue *bufq1 = NULL;
+ struct idpf_rx_queue *rxq;
+ uint16_t peer_port, peer_q;
+ uint16_t qid;
+ int ret;
+
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue.");
+ return -EINVAL;
+ }
+
+ if (conf->peer_count != 1) {
+ PMD_INIT_LOG(ERR, "Can't support Rx hairpin queue peer count %d", conf->peer_count);
+ return -EINVAL;
+ }
+
+ peer_port = conf->peers[0].port;
+ peer_q = conf->peers[0].queue;
+
+ if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
+ nb_desc > CPFL_MAX_RING_DESC ||
+ nb_desc < CPFL_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is invalid", nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory if needed */
+ if (dev->data->rx_queues[queue_idx]) {
+ cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* Setup Rx description queue */
+ cpfl_rxq = rte_zmalloc_socket("cpfl hairpin rxq",
+ sizeof(struct cpfl_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!cpfl_rxq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
+ return -ENOMEM;
+ }
+
+ rxq = &cpfl_rxq->base;
+ hairpin_info = &cpfl_rxq->hairpin_info;
+ rxq->nb_rx_desc = nb_desc * 2;
+ rxq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid, logic_qid);
+ rxq->port_id = dev->data->port_id;
+ rxq->adapter = adapter_base;
+ rxq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
+ hairpin_info->hairpin_q = true;
+ hairpin_info->peer_txp = peer_port;
+ hairpin_info->peer_txq_id = peer_q;
+
+ if (conf->manual_bind != 0)
+ cpfl_vport->p2p_manual_bind = true;
+ else
+ cpfl_vport->p2p_manual_bind = false;
+
+ if (cpfl_vport->p2p_rx_bufq == NULL) {
+ bufq1 = rte_zmalloc_socket("hairpin rx bufq1",
+ sizeof(struct idpf_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!bufq1) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for hairpin Rx buffer queue 1.");
+ ret = -ENOMEM;
+ goto err_alloc_bufq1;
+ }
+ qid = 2 * logic_qid;
+ ret = cpfl_rx_hairpin_bufq_setup(dev, bufq1, qid, nb_desc);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to setup hairpin Rx buffer queue 1");
+ ret = -EINVAL;
+ goto err_setup_bufq1;
+ }
+ cpfl_vport->p2p_rx_bufq = bufq1;
+ }
+
+ rxq->bufq1 = cpfl_vport->p2p_rx_bufq;
+ rxq->bufq2 = NULL;
+
+ cpfl_vport->nb_p2p_rxq++;
+ rxq->q_set = true;
+ dev->data->rx_queues[queue_idx] = cpfl_rxq;
+
+ return 0;
+
+err_setup_bufq1:
+ rte_mempool_free(cpfl_vport->p2p_mp);
+ rte_free(bufq1);
+err_alloc_bufq1:
+ rte_free(cpfl_rxq);
+
+ return ret;
+}
+
+int
+cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter_base = vport->adapter;
+ uint16_t logic_qid = cpfl_vport->nb_p2p_txq;
+ struct cpfl_txq_hairpin_info *hairpin_info;
+ struct idpf_hw *hw = &adapter_base->hw;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct idpf_tx_queue *txq, *cq;
+ const struct rte_memzone *mz;
+ uint32_t ring_size;
+ uint16_t peer_port, peer_q;
+ int ret;
+
+ if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue.");
+ return -EINVAL;
+ }
+
+ if (conf->peer_count != 1) {
+ PMD_INIT_LOG(ERR, "Can't support Tx hairpin queue peer count %d", conf->peer_count);
+ return -EINVAL;
+ }
+
+ peer_port = conf->peers[0].port;
+ peer_q = conf->peers[0].queue;
+
+ if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
+ nb_desc > CPFL_MAX_RING_DESC ||
+ nb_desc < CPFL_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is invalid",
+ nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory if needed. */
+ if (dev->data->tx_queues[queue_idx]) {
+ cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate the TX queue data structure. */
+ cpfl_txq = rte_zmalloc_socket("cpfl hairpin txq",
+ sizeof(struct cpfl_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!cpfl_txq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+ return -ENOMEM;
+ }
+
+ txq = &cpfl_txq->base;
+ hairpin_info = &cpfl_txq->hairpin_info;
+ /* Txq ring length should be 2 times of Tx completion queue size. */
+ txq->nb_tx_desc = nb_desc * 2;
+ txq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_start_qid, logic_qid);
+ txq->port_id = dev->data->port_id;
+ hairpin_info->hairpin_q = true;
+ hairpin_info->peer_rxp = peer_port;
+ hairpin_info->peer_rxq_id = peer_q;
+
+ if (conf->manual_bind != 0)
+ cpfl_vport->p2p_manual_bind = true;
+ else
+ cpfl_vport->p2p_manual_bind = false;
+
+ /* Always Tx hairpin queue allocates Tx HW ring */
+ ring_size = RTE_ALIGN(txq->nb_tx_desc * CPFL_P2P_DESC_LEN,
+ CPFL_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_ring", logic_qid,
+ ring_size + CPFL_P2P_RING_BUF,
+ CPFL_RING_BASE_ALIGN,
+ dev->device->numa_node);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
+ ret = -ENOMEM;
+ goto err_txq_mz_rsv;
+ }
+
+ txq->tx_ring_phys_addr = mz->iova;
+ txq->desc_ring = mz->addr;
+ txq->mz = mz;
+
+ cpfl_tx_hairpin_descq_reset(txq);
+ txq->qtx_tail = hw->hw_addr +
+ cpfl_hw_qtail_get(cpfl_vport->p2p_q_chunks_info->tx_qtail_start,
+ logic_qid, cpfl_vport->p2p_q_chunks_info->tx_qtail_spacing);
+ txq->ops = &def_txq_ops;
+
+ if (cpfl_vport->p2p_tx_complq == NULL) {
+ cq = rte_zmalloc_socket("cpfl hairpin cq",
+ sizeof(struct idpf_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ dev->device->numa_node);
+ if (!cq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+ ret = -ENOMEM;
+ goto err_cq_alloc;
+ }
+
+ cq->nb_tx_desc = nb_desc;
+ cq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_compl_start_qid,
+ 0);
+ cq->port_id = dev->data->port_id;
+
+ /* Tx completion queue always allocates the HW ring */
+ ring_size = RTE_ALIGN(cq->nb_tx_desc * CPFL_P2P_DESC_LEN,
+ CPFL_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_compl_ring", logic_qid,
+ ring_size + CPFL_P2P_RING_BUF,
+ CPFL_RING_BASE_ALIGN,
+ dev->device->numa_node);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX completion queue");
+ ret = -ENOMEM;
+ goto err_cq_mz_rsv;
+ }
+ cq->tx_ring_phys_addr = mz->iova;
+ cq->compl_ring = mz->addr;
+ cq->mz = mz;
+
+ cpfl_tx_hairpin_complq_reset(cq);
+ cpfl_vport->p2p_tx_complq = cq;
+ }
+
+ txq->complq = cpfl_vport->p2p_tx_complq;
+
+ cpfl_vport->nb_p2p_txq++;
+ txq->q_set = true;
+ dev->data->tx_queues[queue_idx] = cpfl_txq;
+
+ return 0;
+
+err_cq_mz_rsv:
+ rte_free(cq);
+err_cq_alloc:
+ cpfl_dma_zone_release(mz);
+err_txq_mz_rsv:
+ rte_free(cpfl_txq);
+ return ret;
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
@@ -865,6 +1225,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ continue;
(void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index a4a164d462..06198d4aad 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -22,6 +22,11 @@
#define CPFL_P2P_NB_TX_COMPLQ 1
#define CPFL_P2P_NB_QUEUE_GRPS 1
#define CPFL_P2P_QUEUE_GRP_ID 1
+#define CPFL_P2P_DESC_LEN 16
+#define CPFL_P2P_NB_MBUF 4096
+#define CPFL_P2P_CACHE_SIZE 250
+#define CPFL_P2P_MBUF_SIZE 2048
+#define CPFL_P2P_RING_BUF 128
/* Base address of the HW descriptor ring should be 128B aligned. */
#define CPFL_RING_BASE_ALIGN 128
@@ -33,14 +38,40 @@
#define CPFL_SUPPORT_CHAIN_NUM 5
+struct cpfl_rxq_hairpin_info {
+ bool hairpin_q; /* if rx queue is a hairpin queue */
+ uint16_t peer_txp;
+ uint16_t peer_txq_id;
+};
+
struct cpfl_rx_queue {
struct idpf_rx_queue base;
+ struct cpfl_rxq_hairpin_info hairpin_info;
+};
+
+struct cpfl_txq_hairpin_info {
+ bool hairpin_q; /* if tx queue is a hairpin queue */
+ uint16_t peer_rxp;
+ uint16_t peer_rxq_id;
};
struct cpfl_tx_queue {
struct idpf_tx_queue base;
+ struct cpfl_txq_hairpin_info hairpin_info;
};
+static inline uint16_t
+cpfl_hw_qid_get(uint16_t start_qid, uint16_t offset)
+{
+ return start_qid + offset;
+}
+
+static inline uint64_t
+cpfl_hw_qtail_get(uint64_t tail_start, uint16_t offset, uint64_t tail_spacing)
+{
+ return tail_start + offset * tail_spacing;
+}
+
int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
@@ -59,4 +90,9 @@ void cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void cpfl_set_rx_function(struct rte_eth_dev *dev);
void cpfl_set_tx_function(struct rte_eth_dev *dev);
+int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, const struct rte_eth_hairpin_conf *conf);
+int cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf);
#endif /* _CPFL_RXTX_H_ */
diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
index 5690b17911..d8e9191196 100644
--- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
+++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
@@ -85,6 +85,8 @@ cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)
cpfl_rxq = dev->data->rx_queues[i];
default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ continue;
splitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq->base);
ret = splitq_ret && default_ret;
} else {
@@ -106,6 +108,8 @@ cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_tx_queues; i++) {
cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq->hairpin_info.hairpin_q)
+ continue;
ret = cpfl_tx_vec_queue_default(&cpfl_txq->base);
if (ret == CPFL_SCALAR_PATH)
return CPFL_SCALAR_PATH;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v7 06/14] common/idpf: add queue config API
2023-05-31 13:04 ` [PATCH v7 00/14] net/cpfl: add hairpin queue support beilei.xing
` (4 preceding siblings ...)
2023-05-31 13:04 ` [PATCH v7 05/14] net/cpfl: support hairpin queue setup and release beilei.xing
@ 2023-05-31 13:04 ` beilei.xing
2023-05-31 13:04 ` [PATCH v7 07/14] net/cpfl: support hairpin queue configuration beilei.xing
` (8 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 13:04 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx queue configuration APIs.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 70 ++++++++++++++++++++++
drivers/common/idpf/idpf_common_virtchnl.h | 6 ++
drivers/common/idpf/version.map | 2 +
3 files changed, 78 insertions(+)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index a3fe55c897..211b44a88e 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -1050,6 +1050,41 @@ idpf_vc_rxq_config(struct idpf_vport *vport, struct idpf_rx_queue *rxq)
return err;
}
+int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct virtchnl2_rxq_info *rxq_info,
+ uint16_t num_qs)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+ struct idpf_cmd_info args;
+ int size, err, i;
+
+ size = sizeof(*vc_rxqs) + (num_qs - 1) *
+ sizeof(struct virtchnl2_rxq_info);
+ vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+ if (vc_rxqs == NULL) {
+ DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+ err = -ENOMEM;
+ return err;
+ }
+ vc_rxqs->vport_id = vport->vport_id;
+ vc_rxqs->num_qinfo = num_qs;
+ memcpy(vc_rxqs->qinfo, rxq_info, num_qs * sizeof(struct virtchnl2_rxq_info));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+ args.in_args = (uint8_t *)vc_rxqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ rte_free(vc_rxqs);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+ return err;
+}
+
int
idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq)
{
@@ -1121,6 +1156,41 @@ idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq)
return err;
}
+int
+idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
+ uint16_t num_qs)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+ struct idpf_cmd_info args;
+ int size, err;
+
+ size = sizeof(*vc_txqs) + (num_qs - 1) * sizeof(struct virtchnl2_txq_info);
+ vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+ if (vc_txqs == NULL) {
+ DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+ err = -ENOMEM;
+ return err;
+ }
+ vc_txqs->vport_id = vport->vport_id;
+ vc_txqs->num_qinfo = num_qs;
+ memcpy(vc_txqs->qinfo, txq_info, num_qs * sizeof(struct virtchnl2_txq_info));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+ args.in_args = (uint8_t *)vc_txqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ rte_free(vc_txqs);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+ return err;
+}
+
int
idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
struct idpf_ctlq_msg *q_msg)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index 58b16e1c5d..db83761a5e 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -65,6 +65,12 @@ __rte_internal
int idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
u16 *buff_count, struct idpf_dma_mem **buffs);
__rte_internal
+int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct virtchnl2_rxq_info *rxq_info,
+ uint16_t num_qs);
+__rte_internal
+int idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
+ uint16_t num_qs);
+__rte_internal
int idpf_vc_queue_grps_del(struct idpf_vport *vport,
uint16_t num_q_grps,
struct virtchnl2_queue_group_id *qg_ids);
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 01d18f3f3f..17e77884ce 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -54,8 +54,10 @@ INTERNAL {
idpf_vc_rss_lut_get;
idpf_vc_rss_lut_set;
idpf_vc_rxq_config;
+ idpf_vc_rxq_config_by_info;
idpf_vc_stats_query;
idpf_vc_txq_config;
+ idpf_vc_txq_config_by_info;
idpf_vc_vectors_alloc;
idpf_vc_vectors_dealloc;
idpf_vc_vport_create;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v7 07/14] net/cpfl: support hairpin queue configuration
2023-05-31 13:04 ` [PATCH v7 00/14] net/cpfl: add hairpin queue support beilei.xing
` (5 preceding siblings ...)
2023-05-31 13:04 ` [PATCH v7 06/14] common/idpf: add queue config API beilei.xing
@ 2023-05-31 13:04 ` beilei.xing
2023-05-31 13:04 ` [PATCH v7 08/14] common/idpf: add switch queue API beilei.xing
` (7 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 13:04 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx hairpin queue configuration.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 136 +++++++++++++++++++++++++++++++--
drivers/net/cpfl/cpfl_rxtx.c | 80 +++++++++++++++++++
drivers/net/cpfl/cpfl_rxtx.h | 7 ++
3 files changed, 217 insertions(+), 6 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index b17c538ec2..a06def06d0 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -742,33 +742,157 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
return idpf_vport_irq_map_config(vport, nb_rx_queues);
}
+/* Update hairpin_info for dev's tx hairpin queue */
+static int
+cpfl_txq_hairpin_info_update(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port];
+ struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private;
+ struct cpfl_txq_hairpin_info *hairpin_info;
+ struct cpfl_tx_queue *cpfl_txq;
+ int i;
+
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ hairpin_info = &cpfl_txq->hairpin_info;
+ if (hairpin_info->peer_rxp != rx_port) {
+ PMD_DRV_LOG(ERR, "port %d is not the peer port", rx_port);
+ return -EINVAL;
+ }
+ hairpin_info->peer_rxq_id =
+ cpfl_hw_qid_get(cpfl_rx_vport->p2p_q_chunks_info->rx_start_qid,
+ hairpin_info->peer_rxq_id - cpfl_rx_vport->nb_data_rxq);
+ }
+
+ return 0;
+}
+
+/* Bind Rx hairpin queue's memory zone to peer Tx hairpin queue's memory zone */
+static void
+cpfl_rxq_hairpin_mz_bind(struct rte_eth_dev *dev)
+{
+ struct cpfl_vport *cpfl_rx_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_rx_vport->base;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_hw *hw = &adapter->hw;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct rte_eth_dev *peer_dev;
+ const struct rte_memzone *mz;
+ uint16_t peer_tx_port;
+ uint16_t peer_tx_qid;
+ int i;
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ peer_tx_port = cpfl_rxq->hairpin_info.peer_txp;
+ peer_tx_qid = cpfl_rxq->hairpin_info.peer_txq_id;
+ peer_dev = &rte_eth_devices[peer_tx_port];
+ cpfl_txq = peer_dev->data->tx_queues[peer_tx_qid];
+
+ /* bind rx queue */
+ mz = cpfl_txq->base.mz;
+ cpfl_rxq->base.rx_ring_phys_addr = mz->iova;
+ cpfl_rxq->base.rx_ring = mz->addr;
+ cpfl_rxq->base.mz = mz;
+
+ /* bind rx buffer queue */
+ mz = cpfl_txq->base.complq->mz;
+ cpfl_rxq->base.bufq1->rx_ring_phys_addr = mz->iova;
+ cpfl_rxq->base.bufq1->rx_ring = mz->addr;
+ cpfl_rxq->base.bufq1->mz = mz;
+ cpfl_rxq->base.bufq1->qrx_tail = hw->hw_addr +
+ cpfl_hw_qtail_get(cpfl_rx_vport->p2p_q_chunks_info->rx_buf_qtail_start,
+ 0, cpfl_rx_vport->p2p_q_chunks_info->rx_buf_qtail_spacing);
+ }
+}
+
static int
cpfl_start_queues(struct rte_eth_dev *dev)
{
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_rx_queue *cpfl_rxq;
struct cpfl_tx_queue *cpfl_txq;
+ int update_flag = 0;
int err = 0;
int i;
+ /* For normal data queues, configure, init and enale Txq.
+ * For non-manual bind hairpin queues, configure Txq.
+ */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
cpfl_txq = dev->data->tx_queues[i];
if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
continue;
- err = cpfl_tx_queue_start(dev, i);
+ if (!cpfl_txq->hairpin_info.hairpin_q) {
+ err = cpfl_tx_queue_start(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
+ return err;
+ }
+ } else if (!cpfl_vport->p2p_manual_bind) {
+ if (update_flag == 0) {
+ err = cpfl_txq_hairpin_info_update(dev,
+ cpfl_txq->hairpin_info.peer_rxp);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to update Tx hairpin queue info");
+ return err;
+ }
+ update_flag = 1;
+ }
+ err = cpfl_hairpin_txq_config(vport, cpfl_txq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u", i);
+ return err;
+ }
+ }
+ }
+
+ /* For non-manual bind hairpin queues, configure Tx completion queue first.*/
+ if (!cpfl_vport->p2p_manual_bind && cpfl_vport->p2p_tx_complq != NULL) {
+ err = cpfl_hairpin_tx_complq_config(cpfl_vport);
if (err != 0) {
- PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
+ PMD_DRV_LOG(ERR, "Fail to config Tx completion queue");
return err;
}
}
+ /* For non-manual bind hairpin queues, configure Rx buffer queue.*/
+ if (!cpfl_vport->p2p_manual_bind && cpfl_vport->p2p_rx_bufq != NULL) {
+ cpfl_rxq_hairpin_mz_bind(dev);
+ err = cpfl_hairpin_rx_bufq_config(cpfl_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue");
+ return err;
+ }
+ }
+
+ /* For normal data queues, configure, init and enale Rxq.
+ * For non-manual bind hairpin queues, configure Rxq, and then init Rxq.
+ */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)
continue;
- err = cpfl_rx_queue_start(dev, i);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
- return err;
+ if (!cpfl_rxq->hairpin_info.hairpin_q) {
+ err = cpfl_rx_queue_start(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
+ return err;
+ }
+ } else if (!cpfl_vport->p2p_manual_bind) {
+ err = cpfl_hairpin_rxq_config(vport, cpfl_rxq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue %u", i);
+ return err;
+ }
+ err = cpfl_rx_queue_init(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u", i);
+ return err;
+ }
}
}
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 90b408d1f4..9408c6e1a4 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -922,6 +922,86 @@ cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return ret;
}
+int
+cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport)
+{
+ struct idpf_rx_queue *rx_bufq = cpfl_vport->p2p_rx_bufq;
+ struct virtchnl2_rxq_info rxq_info[1] = {0};
+
+ rxq_info[0].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ rxq_info[0].queue_id = rx_bufq->queue_id;
+ rxq_info[0].ring_len = rx_bufq->nb_rx_desc;
+ rxq_info[0].dma_ring_addr = rx_bufq->rx_ring_phys_addr;
+ rxq_info[0].desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ rxq_info[0].rx_buffer_low_watermark = CPFL_RXBUF_LOW_WATERMARK;
+ rxq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ rxq_info[0].data_buffer_size = rx_bufq->rx_buf_len;
+ rxq_info[0].buffer_notif_stride = CPFL_RX_BUF_STRIDE;
+
+ return idpf_vc_rxq_config_by_info(&cpfl_vport->base, rxq_info, 1);
+}
+
+int
+cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq)
+{
+ struct virtchnl2_rxq_info rxq_info[1] = {0};
+ struct idpf_rx_queue *rxq = &cpfl_rxq->base;
+
+ rxq_info[0].type = VIRTCHNL2_QUEUE_TYPE_RX;
+ rxq_info[0].queue_id = rxq->queue_id;
+ rxq_info[0].ring_len = rxq->nb_rx_desc;
+ rxq_info[0].dma_ring_addr = rxq->rx_ring_phys_addr;
+ rxq_info[0].rx_bufq1_id = rxq->bufq1->queue_id;
+ rxq_info[0].max_pkt_size = vport->max_pkt_len;
+ rxq_info[0].desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ rxq_info[0].qflags |= VIRTCHNL2_RX_DESC_SIZE_16BYTE;
+
+ rxq_info[0].data_buffer_size = rxq->rx_buf_len;
+ rxq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ rxq_info[0].rx_buffer_low_watermark = CPFL_RXBUF_LOW_WATERMARK;
+
+ PMD_DRV_LOG(NOTICE, "hairpin: vport %u, Rxq id 0x%x",
+ vport->vport_id, rxq_info[0].queue_id);
+
+ return idpf_vc_rxq_config_by_info(vport, rxq_info, 1);
+}
+
+int
+cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport)
+{
+ struct idpf_tx_queue *tx_complq = cpfl_vport->p2p_tx_complq;
+ struct virtchnl2_txq_info txq_info[1] = {0};
+
+ txq_info[0].dma_ring_addr = tx_complq->tx_ring_phys_addr;
+ txq_info[0].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ txq_info[0].queue_id = tx_complq->queue_id;
+ txq_info[0].ring_len = tx_complq->nb_tx_desc;
+ txq_info[0].peer_rx_queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+ txq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ txq_info[0].sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+
+ return idpf_vc_txq_config_by_info(&cpfl_vport->base, txq_info, 1);
+}
+
+int
+cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq)
+{
+ struct idpf_tx_queue *txq = &cpfl_txq->base;
+ struct virtchnl2_txq_info txq_info[1] = {0};
+
+ txq_info[0].dma_ring_addr = txq->tx_ring_phys_addr;
+ txq_info[0].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ txq_info[0].queue_id = txq->queue_id;
+ txq_info[0].ring_len = txq->nb_tx_desc;
+ txq_info[0].tx_compl_queue_id = txq->complq->queue_id;
+ txq_info[0].relative_queue_id = txq->queue_id;
+ txq_info[0].peer_rx_queue_id = cpfl_txq->hairpin_info.peer_rxq_id;
+ txq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ txq_info[0].sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+
+ return idpf_vc_txq_config_by_info(vport, txq_info, 1);
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 06198d4aad..872ebc1bfd 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -32,12 +32,15 @@
#define CPFL_RING_BASE_ALIGN 128
#define CPFL_DEFAULT_RX_FREE_THRESH 32
+#define CPFL_RXBUF_LOW_WATERMARK 64
#define CPFL_DEFAULT_TX_RS_THRESH 32
#define CPFL_DEFAULT_TX_FREE_THRESH 32
#define CPFL_SUPPORT_CHAIN_NUM 5
+#define CPFL_RX_BUF_STRIDE 64
+
struct cpfl_rxq_hairpin_info {
bool hairpin_q; /* if rx queue is a hairpin queue */
uint16_t peer_txp;
@@ -95,4 +98,8 @@ int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
int cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc,
const struct rte_eth_hairpin_conf *conf);
+int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport);
+int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq);
+int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
+int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
#endif /* _CPFL_RXTX_H_ */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v7 08/14] common/idpf: add switch queue API
2023-05-31 13:04 ` [PATCH v7 00/14] net/cpfl: add hairpin queue support beilei.xing
` (6 preceding siblings ...)
2023-05-31 13:04 ` [PATCH v7 07/14] net/cpfl: support hairpin queue configuration beilei.xing
@ 2023-05-31 13:04 ` beilei.xing
2023-05-31 13:04 ` [PATCH v7 09/14] net/cpfl: support hairpin queue start/stop beilei.xing
` (6 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 13:04 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds idpf_vc_ena_dis_one_queue API.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 2 +-
drivers/common/idpf/idpf_common_virtchnl.h | 3 +++
drivers/common/idpf/version.map | 1 +
3 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index 211b44a88e..6455f640da 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -733,7 +733,7 @@ idpf_vc_vectors_dealloc(struct idpf_vport *vport)
return err;
}
-static int
+int
idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
uint32_t type, bool on)
{
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index db83761a5e..9ff5c38c26 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -71,6 +71,9 @@ __rte_internal
int idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
uint16_t num_qs);
__rte_internal
+int idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
+ uint32_t type, bool on);
+__rte_internal
int idpf_vc_queue_grps_del(struct idpf_vport *vport,
uint16_t num_q_grps,
struct virtchnl2_queue_group_id *qg_ids);
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 17e77884ce..25624732b0 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -40,6 +40,7 @@ INTERNAL {
idpf_vc_cmd_execute;
idpf_vc_ctlq_post_rx_buffs;
idpf_vc_ctlq_recv;
+ idpf_vc_ena_dis_one_queue;
idpf_vc_irq_map_unmap_config;
idpf_vc_one_msg_read;
idpf_vc_ptype_info_query;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v7 09/14] net/cpfl: support hairpin queue start/stop
2023-05-31 13:04 ` [PATCH v7 00/14] net/cpfl: add hairpin queue support beilei.xing
` (7 preceding siblings ...)
2023-05-31 13:04 ` [PATCH v7 08/14] common/idpf: add switch queue API beilei.xing
@ 2023-05-31 13:04 ` beilei.xing
2023-05-31 13:04 ` [PATCH v7 10/14] common/idpf: add irq map config API beilei.xing
` (5 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 13:04 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx hairpin queue start/stop.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 46 +++++++++
drivers/net/cpfl/cpfl_rxtx.c | 164 +++++++++++++++++++++++++++++----
drivers/net/cpfl/cpfl_rxtx.h | 15 +++
3 files changed, 207 insertions(+), 18 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index a06def06d0..2b99e58341 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -896,6 +896,52 @@ cpfl_start_queues(struct rte_eth_dev *dev)
}
}
+ /* For non-manual bind hairpin queues, enable Tx queue and Rx queue,
+ * then enable Tx completion queue and Rx buffer queue.
+ */
+ for (i = cpfl_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq->hairpin_info.hairpin_q && !cpfl_vport->p2p_manual_bind) {
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ i - cpfl_vport->nb_data_txq,
+ false, true);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u on",
+ i);
+ else
+ cpfl_txq->base.q_started = true;
+ }
+ }
+
+ for (i = cpfl_vport->nb_data_rxq; i < dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q && !cpfl_vport->p2p_manual_bind) {
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ i - cpfl_vport->nb_data_rxq,
+ true, true);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u on",
+ i);
+ else
+ cpfl_rxq->base.q_started = true;
+ }
+ }
+
+ if (!cpfl_vport->p2p_manual_bind &&
+ cpfl_vport->p2p_tx_complq != NULL &&
+ cpfl_vport->p2p_rx_bufq != NULL) {
+ err = cpfl_switch_hairpin_complq(cpfl_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq");
+ return err;
+ }
+ err = cpfl_switch_hairpin_bufq(cpfl_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Rx bufq");
+ return err;
+ }
+ }
+
return err;
}
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 9408c6e1a4..8d1f8a560b 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -1002,6 +1002,89 @@ cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq
return idpf_vc_txq_config_by_info(vport, txq_info, 1);
}
+int
+cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ queue_id = cpfl_vport->p2p_tx_complq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
+int
+cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
+int
+cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t logic_qid,
+ bool rx, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = rx ? VIRTCHNL2_QUEUE_TYPE_RX : VIRTCHNL2_QUEUE_TYPE_TX;
+
+ if (type == VIRTCHNL2_QUEUE_TYPE_RX)
+ queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid, logic_qid);
+ else
+ queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_start_qid, logic_qid);
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+ if (err)
+ return err;
+
+ return err;
+}
+
+static int
+cpfl_alloc_split_p2p_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+ volatile struct virtchnl2_p2p_rx_buf_desc *rxd;
+ struct rte_mbuf *mbuf = NULL;
+ uint64_t dma_addr;
+ uint16_t i;
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ mbuf = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!mbuf)) {
+ PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+ return -ENOMEM;
+ }
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+ rxd = &((volatile struct virtchnl2_p2p_rx_buf_desc *)(rxq->rx_ring))[i];
+ rxd->reserve0 = 0;
+ rxd->pkt_addr = dma_addr;
+ }
+
+ rxq->nb_rx_hold = 0;
+ /* The value written in the RX buffer queue tail register, must be a multiple of 8.*/
+ rxq->rx_tail = rxq->nb_rx_desc - CPFL_HAIRPIN_Q_TAIL_AUX_VALUE;
+
+ return 0;
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
@@ -1055,22 +1138,31 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
IDPF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
} else {
/* Split queue */
- err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
- return err;
- }
- err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
- return err;
+ if (cpfl_rxq->hairpin_info.hairpin_q) {
+ err = cpfl_alloc_split_p2p_rxq_mbufs(rxq->bufq1);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate p2p RX buffer queue mbuf");
+ return err;
+ }
+ } else {
+ err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
+ return err;
+ }
+ err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
+ return err;
+ }
}
rte_wmb();
/* Init the RX tail register. */
IDPF_PCI_REG_WRITE(rxq->bufq1->qrx_tail, rxq->bufq1->rx_tail);
- IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail);
+ if (rxq->bufq2)
+ IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail);
}
return err;
@@ -1177,7 +1269,12 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return -EINVAL;
cpfl_rxq = dev->data->rx_queues[rx_queue_id];
- err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ rx_queue_id - cpfl_vport->nb_data_txq,
+ true, false);
+ else
+ err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
rx_queue_id);
@@ -1191,10 +1288,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
idpf_qc_single_rx_queue_reset(rxq);
} else {
rxq->bufq1->ops->release_mbufs(rxq->bufq1);
- rxq->bufq2->ops->release_mbufs(rxq->bufq2);
- idpf_qc_split_rx_queue_reset(rxq);
+ if (rxq->bufq2)
+ rxq->bufq2->ops->release_mbufs(rxq->bufq2);
+ if (cpfl_rxq->hairpin_info.hairpin_q) {
+ cpfl_rx_hairpin_descq_reset(rxq);
+ cpfl_rx_hairpin_bufq_reset(rxq->bufq1);
+ } else {
+ idpf_qc_split_rx_queue_reset(rxq);
+ }
}
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ if (!cpfl_rxq->hairpin_info.hairpin_q)
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1213,7 +1317,12 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
cpfl_txq = dev->data->tx_queues[tx_queue_id];
- err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
+ if (cpfl_txq->hairpin_info.hairpin_q)
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ tx_queue_id - cpfl_vport->nb_data_txq,
+ false, false);
+ else
+ err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
tx_queue_id);
@@ -1226,10 +1335,17 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
idpf_qc_single_tx_queue_reset(txq);
} else {
- idpf_qc_split_tx_descq_reset(txq);
- idpf_qc_split_tx_complq_reset(txq->complq);
+ if (cpfl_txq->hairpin_info.hairpin_q) {
+ cpfl_tx_hairpin_descq_reset(txq);
+ cpfl_tx_hairpin_complq_reset(txq->complq);
+ } else {
+ idpf_qc_split_tx_descq_reset(txq);
+ idpf_qc_split_tx_complq_reset(txq->complq);
+ }
}
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ if (!cpfl_txq->hairpin_info.hairpin_q)
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1249,10 +1365,22 @@ cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
void
cpfl_stop_queues(struct rte_eth_dev *dev)
{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
struct cpfl_rx_queue *cpfl_rxq;
struct cpfl_tx_queue *cpfl_txq;
int i;
+ if (cpfl_vport->p2p_tx_complq != NULL) {
+ if (cpfl_switch_hairpin_complq(cpfl_vport, false) != 0)
+ PMD_DRV_LOG(ERR, "Failed to stop hairpin Tx complq");
+ }
+
+ if (cpfl_vport->p2p_rx_bufq != NULL) {
+ if (cpfl_switch_hairpin_bufq(cpfl_vport, false) != 0)
+ PMD_DRV_LOG(ERR, "Failed to stop hairpin Rx bufq");
+ }
+
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
if (cpfl_rxq == NULL)
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 872ebc1bfd..aacd087b56 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -41,6 +41,17 @@
#define CPFL_RX_BUF_STRIDE 64
+/* The value written in the RX buffer queue tail register,
+ * and in WritePTR field in the TX completion queue context,
+ * must be a multiple of 8.
+ */
+#define CPFL_HAIRPIN_Q_TAIL_AUX_VALUE 8
+
+struct virtchnl2_p2p_rx_buf_desc {
+ __le64 reserve0;
+ __le64 pkt_addr; /* Packet buffer address */
+};
+
struct cpfl_rxq_hairpin_info {
bool hairpin_q; /* if rx queue is a hairpin queue */
uint16_t peer_txp;
@@ -102,4 +113,8 @@ int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq);
int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
+int cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t qid,
+ bool rx, bool on);
#endif /* _CPFL_RXTX_H_ */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v7 10/14] common/idpf: add irq map config API
2023-05-31 13:04 ` [PATCH v7 00/14] net/cpfl: add hairpin queue support beilei.xing
` (8 preceding siblings ...)
2023-05-31 13:04 ` [PATCH v7 09/14] net/cpfl: support hairpin queue start/stop beilei.xing
@ 2023-05-31 13:04 ` beilei.xing
2023-05-31 13:04 ` [PATCH v7 11/14] net/cpfl: enable write back based on ITR expire beilei.xing
` (4 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 13:04 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch supports idpf_vport_irq_map_config_by_qids API.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_device.c | 75 ++++++++++++++++++++++++
drivers/common/idpf/idpf_common_device.h | 4 ++
drivers/common/idpf/version.map | 1 +
3 files changed, 80 insertions(+)
diff --git a/drivers/common/idpf/idpf_common_device.c b/drivers/common/idpf/idpf_common_device.c
index dc47551b17..cc4207a46e 100644
--- a/drivers/common/idpf/idpf_common_device.c
+++ b/drivers/common/idpf/idpf_common_device.c
@@ -667,6 +667,81 @@ idpf_vport_irq_map_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
return ret;
}
+int
+idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport, uint32_t *qids, uint16_t nb_rx_queues)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_queue_vector *qv_map;
+ struct idpf_hw *hw = &adapter->hw;
+ uint32_t dynctl_val, itrn_val;
+ uint32_t dynctl_reg_start;
+ uint32_t itrn_reg_start;
+ uint16_t i;
+ int ret;
+
+ qv_map = rte_zmalloc("qv_map",
+ nb_rx_queues *
+ sizeof(struct virtchnl2_queue_vector), 0);
+ if (qv_map == NULL) {
+ DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
+ nb_rx_queues);
+ ret = -ENOMEM;
+ goto qv_map_alloc_err;
+ }
+
+ /* Rx interrupt disabled, Map interrupt only for writeback */
+
+ /* The capability flags adapter->caps.other_caps should be
+ * compared with bit VIRTCHNL2_CAP_WB_ON_ITR here. The if
+ * condition should be updated when the FW can return the
+ * correct flag bits.
+ */
+ dynctl_reg_start =
+ vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
+ itrn_reg_start =
+ vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
+ dynctl_val = IDPF_READ_REG(hw, dynctl_reg_start);
+ DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x", dynctl_val);
+ itrn_val = IDPF_READ_REG(hw, itrn_reg_start);
+ DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val);
+ /* Force write-backs by setting WB_ON_ITR bit in DYN_CTL
+ * register. WB_ON_ITR and INTENA are mutually exclusive
+ * bits. Setting WB_ON_ITR bits means TX and RX Descs
+ * are written back based on ITR expiration irrespective
+ * of INTENA setting.
+ */
+ /* TBD: need to tune INTERVAL value for better performance. */
+ itrn_val = (itrn_val == 0) ? IDPF_DFLT_INTERVAL : itrn_val;
+ dynctl_val = VIRTCHNL2_ITR_IDX_0 <<
+ PF_GLINT_DYN_CTL_ITR_INDX_S |
+ PF_GLINT_DYN_CTL_WB_ON_ITR_M |
+ itrn_val << PF_GLINT_DYN_CTL_INTERVAL_S;
+ IDPF_WRITE_REG(hw, dynctl_reg_start, dynctl_val);
+
+ for (i = 0; i < nb_rx_queues; i++) {
+ /* map all queues to the same vector */
+ qv_map[i].queue_id = qids[i];
+ qv_map[i].vector_id =
+ vport->recv_vectors->vchunks.vchunks->start_vector_id;
+ }
+ vport->qv_map = qv_map;
+
+ ret = idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, true);
+ if (ret != 0) {
+ DRV_LOG(ERR, "config interrupt mapping failed");
+ goto config_irq_map_err;
+ }
+
+ return 0;
+
+config_irq_map_err:
+ rte_free(vport->qv_map);
+ vport->qv_map = NULL;
+
+qv_map_alloc_err:
+ return ret;
+}
+
int
idpf_vport_irq_unmap_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
{
diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index 112367dae8..f767ea7cec 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -200,5 +200,9 @@ int idpf_vport_info_init(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_info);
__rte_internal
void idpf_vport_stats_update(struct virtchnl2_vport_stats *oes, struct virtchnl2_vport_stats *nes);
+__rte_internal
+int idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport,
+ uint32_t *qids,
+ uint16_t nb_rx_queues);
#endif /* _IDPF_COMMON_DEVICE_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 25624732b0..0729f6b912 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -69,6 +69,7 @@ INTERNAL {
idpf_vport_info_init;
idpf_vport_init;
idpf_vport_irq_map_config;
+ idpf_vport_irq_map_config_by_qids;
idpf_vport_irq_unmap_config;
idpf_vport_rss_config;
idpf_vport_stats_update;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v7 11/14] net/cpfl: enable write back based on ITR expire
2023-05-31 13:04 ` [PATCH v7 00/14] net/cpfl: add hairpin queue support beilei.xing
` (9 preceding siblings ...)
2023-05-31 13:04 ` [PATCH v7 10/14] common/idpf: add irq map config API beilei.xing
@ 2023-05-31 13:04 ` beilei.xing
2023-05-31 13:04 ` [PATCH v7 12/14] net/cpfl: support peer ports get beilei.xing
` (3 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 13:04 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch enables write back based on ITR expire
(WR_ON_ITR) for hairpin queues.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 2b99e58341..850f1c0bc6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -735,11 +735,22 @@ cpfl_dev_configure(struct rte_eth_dev *dev)
static int
cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
{
+ uint32_t qids[CPFL_MAX_P2P_NB_QUEUES + IDPF_DEFAULT_RXQ_NUM] = {0};
struct cpfl_vport *cpfl_vport = dev->data->dev_private;
struct idpf_vport *vport = &cpfl_vport->base;
uint16_t nb_rx_queues = dev->data->nb_rx_queues;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i;
- return idpf_vport_irq_map_config(vport, nb_rx_queues);
+ for (i = 0; i < nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ qids[i] = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid,
+ (i - cpfl_vport->nb_data_rxq));
+ else
+ qids[i] = cpfl_hw_qid_get(vport->chunks_info.rx_start_qid, i);
+ }
+ return idpf_vport_irq_map_config_by_qids(vport, qids, nb_rx_queues);
}
/* Update hairpin_info for dev's tx hairpin queue */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v7 12/14] net/cpfl: support peer ports get
2023-05-31 13:04 ` [PATCH v7 00/14] net/cpfl: add hairpin queue support beilei.xing
` (10 preceding siblings ...)
2023-05-31 13:04 ` [PATCH v7 11/14] net/cpfl: enable write back based on ITR expire beilei.xing
@ 2023-05-31 13:04 ` beilei.xing
2023-05-31 13:04 ` [PATCH v7 13/14] net/cpfl: support hairpin bind/unbind beilei.xing
` (2 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 13:04 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports get hairpin peer ports.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 40 ++++++++++++++++++++++++++++++++++
1 file changed, 40 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 850f1c0bc6..9fc7d3401f 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1080,6 +1080,45 @@ cpfl_dev_close(struct rte_eth_dev *dev)
return 0;
}
+static int
+cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
+ size_t len, uint32_t tx)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_tx_queue *txq;
+ struct idpf_rx_queue *rxq;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i, j;
+
+ if (len <= 0)
+ return -EINVAL;
+
+ if (cpfl_vport->p2p_q_chunks_info == NULL)
+ return -ENOTSUP;
+
+ if (tx > 0) {
+ for (i = cpfl_vport->nb_data_txq, j = 0; i < dev->data->nb_tx_queues; i++, j++) {
+ txq = dev->data->tx_queues[i];
+ if (txq == NULL)
+ return -EINVAL;
+ cpfl_txq = (struct cpfl_tx_queue *)txq;
+ peer_ports[j] = cpfl_txq->hairpin_info.peer_rxp;
+ }
+ } else if (tx == 0) {
+ for (i = cpfl_vport->nb_data_rxq, j = 0; i < dev->data->nb_rx_queues; i++, j++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq == NULL)
+ return -EINVAL;
+ cpfl_rxq = (struct cpfl_rx_queue *)rxq;
+ peer_ports[j] = cpfl_rxq->hairpin_info.peer_txp;
+ }
+ }
+
+ return j;
+}
+
static const struct eth_dev_ops cpfl_eth_dev_ops = {
.dev_configure = cpfl_dev_configure,
.dev_close = cpfl_dev_close,
@@ -1109,6 +1148,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.hairpin_cap_get = cpfl_hairpin_cap_get,
.rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
.tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
+ .hairpin_get_peer_ports = cpfl_hairpin_get_peer_ports,
};
static int
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v7 13/14] net/cpfl: support hairpin bind/unbind
2023-05-31 13:04 ` [PATCH v7 00/14] net/cpfl: add hairpin queue support beilei.xing
` (11 preceding siblings ...)
2023-05-31 13:04 ` [PATCH v7 12/14] net/cpfl: support peer ports get beilei.xing
@ 2023-05-31 13:04 ` beilei.xing
2023-05-31 13:04 ` [PATCH v7 14/14] doc: update the doc of CPFL PMD beilei.xing
2023-06-05 6:17 ` [PATCH v8 00/14] net/cpfl: add hairpin queue support beilei.xing
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 13:04 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports hairpin_bind/unbind ops.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 137 +++++++++++++++++++++++++++++++++
1 file changed, 137 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 9fc7d3401f..ff36f02b11 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1119,6 +1119,141 @@ cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
return j;
}
+static int
+cpfl_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct idpf_vport *tx_vport = &cpfl_tx_vport->base;
+ struct cpfl_vport *cpfl_rx_vport;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct rte_eth_dev *peer_dev;
+ struct idpf_vport *rx_vport;
+ int err = 0;
+ int i;
+
+ err = cpfl_txq_hairpin_info_update(dev, rx_port);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to update Tx hairpin queue info.");
+ return err;
+ }
+
+ /* configure hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ err = cpfl_hairpin_txq_config(tx_vport, cpfl_txq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u", i);
+ return err;
+ }
+ }
+
+ err = cpfl_hairpin_tx_complq_config(cpfl_tx_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Tx completion queue");
+ return err;
+ }
+
+ peer_dev = &rte_eth_devices[rx_port];
+ cpfl_rx_vport = (struct cpfl_vport *)peer_dev->data->dev_private;
+ rx_vport = &cpfl_rx_vport->base;
+ cpfl_rxq_hairpin_mz_bind(peer_dev);
+
+ err = cpfl_hairpin_rx_bufq_config(cpfl_rx_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue");
+ return err;
+ }
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ err = cpfl_hairpin_rxq_config(rx_vport, cpfl_rxq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue %u", i);
+ return err;
+ }
+ err = cpfl_rx_queue_init(peer_dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u", i);
+ return err;
+ }
+ }
+
+ /* enable hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport,
+ i - cpfl_tx_vport->nb_data_txq,
+ false, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u on",
+ i);
+ return err;
+ }
+ cpfl_txq->base.q_started = true;
+ }
+
+ err = cpfl_switch_hairpin_complq(cpfl_tx_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq");
+ return err;
+ }
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport,
+ i - cpfl_rx_vport->nb_data_rxq,
+ true, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u on",
+ i);
+ }
+ cpfl_rxq->base.q_started = true;
+ }
+
+ err = cpfl_switch_hairpin_bufq(cpfl_rx_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Rx buffer queue");
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+cpfl_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port];
+ struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i;
+
+ /* disable hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport,
+ i - cpfl_tx_vport->nb_data_txq,
+ false, false);
+ cpfl_txq->base.q_started = false;
+ }
+
+ cpfl_switch_hairpin_complq(cpfl_tx_vport, false);
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport,
+ i - cpfl_rx_vport->nb_data_rxq,
+ true, false);
+ cpfl_rxq->base.q_started = false;
+ }
+
+ cpfl_switch_hairpin_bufq(cpfl_rx_vport, false);
+
+ return 0;
+}
+
static const struct eth_dev_ops cpfl_eth_dev_ops = {
.dev_configure = cpfl_dev_configure,
.dev_close = cpfl_dev_close,
@@ -1149,6 +1284,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
.tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
.hairpin_get_peer_ports = cpfl_hairpin_get_peer_ports,
+ .hairpin_bind = cpfl_hairpin_bind,
+ .hairpin_unbind = cpfl_hairpin_unbind,
};
static int
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v7 14/14] doc: update the doc of CPFL PMD
2023-05-31 13:04 ` [PATCH v7 00/14] net/cpfl: add hairpin queue support beilei.xing
` (12 preceding siblings ...)
2023-05-31 13:04 ` [PATCH v7 13/14] net/cpfl: support hairpin bind/unbind beilei.xing
@ 2023-05-31 13:04 ` beilei.xing
2023-06-05 6:17 ` [PATCH v8 00/14] net/cpfl: add hairpin queue support beilei.xing
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-05-31 13:04 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
Update cpfl.rst to clarify hairpin support.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
doc/guides/nics/cpfl.rst | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index d25db088eb..8d5c3082e4 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -106,3 +106,10 @@ The paths are chosen based on 2 conditions:
A value "P" means the offload feature is not supported by vector path.
If any not supported features are used, cpfl vector PMD is disabled
and the scalar paths are chosen.
+
+Hairpin queue
+~~~~~~~~~~~~~
+
+ E2100 Series can loopback packets from RX port to TX port, this feature is
+ called port-to-port or hairpin.
+ Currently, the PMD only supports single port hairpin.
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v8 00/14] net/cpfl: add hairpin queue support
2023-05-31 13:04 ` [PATCH v7 00/14] net/cpfl: add hairpin queue support beilei.xing
` (13 preceding siblings ...)
2023-05-31 13:04 ` [PATCH v7 14/14] doc: update the doc of CPFL PMD beilei.xing
@ 2023-06-05 6:17 ` beilei.xing
2023-06-05 6:17 ` [PATCH v8 01/14] net/cpfl: refine structures beilei.xing
` (14 more replies)
14 siblings, 15 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 6:17 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patchset adds hairpin queue support.
v2 changes:
- change hairpin rx queus configuration sequence.
- code refine.
v3 changes:
- Refine the patchset based on the latest code.
v4 change:
- Remove hairpin rx buffer queue's sw_ring.
- Change hairpin rx queus configuration sequence in cpfl_hairpin_bind function.
- Refind hairpin queue setup and release.
v5 change:
- Fix memory leak during queue setup.
- Refine hairpin Rxq/Txq start/stop.
v6 change:
- Add sign-off.
v7 change:
- Update cpfl.rst
v8 change:
- Fix Intel-compilation failure.
Beilei Xing (14):
net/cpfl: refine structures
common/idpf: support queue groups add/delete
net/cpfl: add haipin queue group during vport init
net/cpfl: support hairpin queue capbility get
net/cpfl: support hairpin queue setup and release
common/idpf: add queue config API
net/cpfl: support hairpin queue configuration
common/idpf: add switch queue API
net/cpfl: support hairpin queue start/stop
common/idpf: add irq map config API
net/cpfl: enable write back based on ITR expire
net/cpfl: support peer ports get
net/cpfl: support hairpin bind/unbind
doc: update the doc of CPFL PMD
doc/guides/nics/cpfl.rst | 7 +
drivers/common/idpf/idpf_common_device.c | 75 ++
drivers/common/idpf/idpf_common_device.h | 4 +
drivers/common/idpf/idpf_common_virtchnl.c | 138 +++-
drivers/common/idpf/idpf_common_virtchnl.h | 18 +
drivers/common/idpf/version.map | 6 +
drivers/net/cpfl/cpfl_ethdev.c | 611 ++++++++++++++--
drivers/net/cpfl/cpfl_ethdev.h | 35 +-
drivers/net/cpfl/cpfl_rxtx.c | 781 +++++++++++++++++++--
drivers/net/cpfl/cpfl_rxtx.h | 76 ++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 21 +-
11 files changed, 1653 insertions(+), 119 deletions(-)
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v8 01/14] net/cpfl: refine structures
2023-06-05 6:17 ` [PATCH v8 00/14] net/cpfl: add hairpin queue support beilei.xing
@ 2023-06-05 6:17 ` beilei.xing
2023-06-05 6:17 ` [PATCH v8 02/14] common/idpf: support queue groups add/delete beilei.xing
` (13 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 6:17 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch refines some structures to support hairpin queue,
cpfl_rx_queue/cpfl_tx_queue/cpfl_vport.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 85 +++++++-----
drivers/net/cpfl/cpfl_ethdev.h | 6 +-
drivers/net/cpfl/cpfl_rxtx.c | 175 +++++++++++++++++-------
drivers/net/cpfl/cpfl_rxtx.h | 8 ++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 17 +--
5 files changed, 196 insertions(+), 95 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 7528a14d05..e587155db6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -124,7 +124,8 @@ static int
cpfl_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct rte_eth_link new_link;
unsigned int i;
@@ -156,7 +157,8 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,
static int
cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
dev_info->max_rx_queues = base->caps.max_rx_q;
@@ -216,7 +218,8 @@ cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
static int
cpfl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
/* mtu setting is forbidden if port is start */
if (dev->data->dev_started) {
@@ -256,12 +259,12 @@ static uint64_t
cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
{
uint64_t mbuf_alloc_failed = 0;
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i = 0;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- mbuf_alloc_failed += __atomic_load_n(&rxq->rx_stats.mbuf_alloc_failed,
+ cpfl_rxq = dev->data->rx_queues[i];
+ mbuf_alloc_failed += __atomic_load_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,
__ATOMIC_RELAXED);
}
@@ -271,8 +274,8 @@ cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
static int
cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
int ret;
@@ -305,20 +308,20 @@ cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
static void
cpfl_reset_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- __atomic_store_n(&rxq->rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
+ cpfl_rxq = dev->data->rx_queues[i];
+ __atomic_store_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
}
}
static int
cpfl_dev_stats_reset(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
int ret;
@@ -343,8 +346,8 @@ static int cpfl_dev_xstats_reset(struct rte_eth_dev *dev)
static int cpfl_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned int n)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
unsigned int i;
int ret;
@@ -459,7 +462,8 @@ cpfl_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t idx, shift;
int ret = 0;
@@ -498,7 +502,8 @@ cpfl_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t idx, shift;
int ret = 0;
@@ -536,7 +541,8 @@ static int
cpfl_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
int ret = 0;
@@ -601,7 +607,8 @@ static int
cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
int ret = 0;
@@ -638,7 +645,8 @@ cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,
static int
cpfl_dev_configure(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct rte_eth_conf *conf = &dev->data->dev_conf;
struct idpf_adapter *base = vport->adapter;
int ret;
@@ -710,7 +718,8 @@ cpfl_dev_configure(struct rte_eth_dev *dev)
static int
cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
uint16_t nb_rx_queues = dev->data->nb_rx_queues;
return idpf_vport_irq_map_config(vport, nb_rx_queues);
@@ -719,14 +728,14 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
static int
cpfl_start_queues(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
- struct idpf_tx_queue *txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
int err = 0;
int i;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL || txq->tx_deferred_start)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
continue;
err = cpfl_tx_queue_start(dev, i);
if (err != 0) {
@@ -736,8 +745,8 @@ cpfl_start_queues(struct rte_eth_dev *dev)
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- if (rxq == NULL || rxq->rx_deferred_start)
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)
continue;
err = cpfl_rx_queue_start(dev, i);
if (err != 0) {
@@ -752,7 +761,8 @@ cpfl_start_queues(struct rte_eth_dev *dev)
static int
cpfl_dev_start(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(base);
uint16_t num_allocated_vectors = base->caps.num_allocated_vectors;
@@ -813,7 +823,8 @@ cpfl_dev_start(struct rte_eth_dev *dev)
static int
cpfl_dev_stop(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
if (dev->data->dev_started == 0)
return 0;
@@ -832,7 +843,8 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
static int
cpfl_dev_close(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
@@ -842,7 +854,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
adapter->cur_vport_nb--;
dev->data->dev_private = NULL;
adapter->vports[vport->sw_idx] = NULL;
- rte_free(vport);
+ rte_free(cpfl_vport);
return 0;
}
@@ -1047,7 +1059,7 @@ cpfl_find_vport(struct cpfl_adapter_ext *adapter, uint32_t vport_id)
int i;
for (i = 0; i < adapter->cur_vport_nb; i++) {
- vport = adapter->vports[i];
+ vport = &adapter->vports[i]->base;
if (vport->vport_id != vport_id)
continue;
else
@@ -1275,7 +1287,8 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter)
static int
cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_vport_param *param = init_params;
struct cpfl_adapter_ext *adapter = param->adapter;
/* for sending create vport virtchnl msg prepare */
@@ -1300,7 +1313,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
goto err;
}
- adapter->vports[param->idx] = vport;
+ adapter->vports[param->idx] = cpfl_vport;
adapter->cur_vports |= RTE_BIT32(param->devarg_id);
adapter->cur_vport_nb++;
@@ -1415,7 +1428,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
snprintf(name, sizeof(name), "cpfl_%s_vport_0",
pci_dev->device.name);
retval = rte_eth_dev_create(&pci_dev->device, name,
- sizeof(struct idpf_vport),
+ sizeof(struct cpfl_vport),
NULL, NULL, cpfl_dev_vport_init,
&vport_param);
if (retval != 0)
@@ -1433,7 +1446,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
pci_dev->device.name,
devargs.req_vports[i]);
retval = rte_eth_dev_create(&pci_dev->device, name,
- sizeof(struct idpf_vport),
+ sizeof(struct cpfl_vport),
NULL, NULL, cpfl_dev_vport_init,
&vport_param);
if (retval != 0)
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 200dfcac02..81fe9ac4c3 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -69,13 +69,17 @@ struct cpfl_devargs {
uint16_t req_vport_nb;
};
+struct cpfl_vport {
+ struct idpf_vport base;
+};
+
struct cpfl_adapter_ext {
TAILQ_ENTRY(cpfl_adapter_ext) next;
struct idpf_adapter base;
char name[CPFL_ADAPTER_NAME_LEN];
- struct idpf_vport **vports;
+ struct cpfl_vport **vports;
uint16_t max_vport_nb;
uint16_t cur_vports; /* bit mask of created vport */
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 75021c3c54..04a51b8d15 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -128,7 +128,8 @@ cpfl_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *rxq,
uint16_t nb_desc, unsigned int socket_id,
struct rte_mempool *mp, uint8_t bufq_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct idpf_hw *hw = &base->hw;
const struct rte_memzone *mz;
@@ -220,15 +221,69 @@ cpfl_rx_split_bufq_release(struct idpf_rx_queue *bufq)
rte_free(bufq);
}
+static void
+cpfl_rx_queue_release(void *rxq)
+{
+ struct cpfl_rx_queue *cpfl_rxq = rxq;
+ struct idpf_rx_queue *q = NULL;
+
+ if (cpfl_rxq == NULL)
+ return;
+
+ q = &cpfl_rxq->base;
+
+ /* Split queue */
+ if (!q->adapter->is_rx_singleq) {
+ if (q->bufq2)
+ cpfl_rx_split_bufq_release(q->bufq2);
+
+ if (q->bufq1)
+ cpfl_rx_split_bufq_release(q->bufq1);
+
+ rte_free(cpfl_rxq);
+ return;
+ }
+
+ /* Single queue */
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(cpfl_rxq);
+}
+
+static void
+cpfl_tx_queue_release(void *txq)
+{
+ struct cpfl_tx_queue *cpfl_txq = txq;
+ struct idpf_tx_queue *q = NULL;
+
+ if (cpfl_txq == NULL)
+ return;
+
+ q = &cpfl_txq->base;
+
+ if (q->complq) {
+ rte_memzone_free(q->complq->mz);
+ rte_free(q->complq);
+ }
+
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(cpfl_txq);
+}
+
int
cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct idpf_hw *hw = &base->hw;
+ struct cpfl_rx_queue *cpfl_rxq;
const struct rte_memzone *mz;
struct idpf_rx_queue *rxq;
uint16_t rx_free_thresh;
@@ -248,21 +303,23 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Free memory if needed */
if (dev->data->rx_queues[queue_idx] != NULL) {
- idpf_qc_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
dev->data->rx_queues[queue_idx] = NULL;
}
/* Setup Rx queue */
- rxq = rte_zmalloc_socket("cpfl rxq",
- sizeof(struct idpf_rx_queue),
+ cpfl_rxq = rte_zmalloc_socket("cpfl rxq",
+ sizeof(struct cpfl_rx_queue),
RTE_CACHE_LINE_SIZE,
socket_id);
- if (rxq == NULL) {
+ if (cpfl_rxq == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
ret = -ENOMEM;
goto err_rxq_alloc;
}
+ rxq = &cpfl_rxq->base;
+
is_splitq = !!(vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
rxq->mp = mp;
@@ -329,7 +386,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
rxq->q_set = true;
- dev->data->rx_queues[queue_idx] = rxq;
+ dev->data->rx_queues[queue_idx] = cpfl_rxq;
return 0;
@@ -349,7 +406,8 @@ cpfl_tx_complq_setup(struct rte_eth_dev *dev, struct idpf_tx_queue *txq,
uint16_t queue_idx, uint16_t nb_desc,
unsigned int socket_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
const struct rte_memzone *mz;
struct idpf_tx_queue *cq;
int ret;
@@ -397,9 +455,11 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t tx_rs_thresh, tx_free_thresh;
+ struct cpfl_tx_queue *cpfl_txq;
struct idpf_hw *hw = &base->hw;
const struct rte_memzone *mz;
struct idpf_tx_queue *txq;
@@ -419,21 +479,23 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Free memory if needed. */
if (dev->data->tx_queues[queue_idx] != NULL) {
- idpf_qc_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
dev->data->tx_queues[queue_idx] = NULL;
}
/* Allocate the TX queue data structure. */
- txq = rte_zmalloc_socket("cpfl txq",
- sizeof(struct idpf_tx_queue),
+ cpfl_txq = rte_zmalloc_socket("cpfl txq",
+ sizeof(struct cpfl_tx_queue),
RTE_CACHE_LINE_SIZE,
socket_id);
- if (txq == NULL) {
+ if (cpfl_txq == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
ret = -ENOMEM;
goto err_txq_alloc;
}
+ txq = &cpfl_txq->base;
+
is_splitq = !!(vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
txq->nb_tx_desc = nb_desc;
@@ -487,7 +549,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
queue_idx * vport->chunks_info.tx_qtail_spacing);
txq->ops = &def_txq_ops;
txq->q_set = true;
- dev->data->tx_queues[queue_idx] = txq;
+ dev->data->tx_queues[queue_idx] = cpfl_txq;
return 0;
@@ -503,6 +565,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
+ struct cpfl_rx_queue *cpfl_rxq;
struct idpf_rx_queue *rxq;
uint16_t max_pkt_len;
uint32_t frame_size;
@@ -511,7 +574,8 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (rx_queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
- rxq = dev->data->rx_queues[rx_queue_id];
+ cpfl_rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = &cpfl_rxq->base;
if (rxq == NULL || !rxq->q_set) {
PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
@@ -575,9 +639,10 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_rx_queue *rxq =
- dev->data->rx_queues[rx_queue_id];
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq = dev->data->rx_queues[rx_queue_id];
+ struct idpf_rx_queue *rxq = &cpfl_rxq->base;
int err = 0;
err = idpf_vc_rxq_config(vport, rxq);
@@ -610,15 +675,15 @@ cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
if (tx_queue_id >= dev->data->nb_tx_queues)
return -EINVAL;
- txq = dev->data->tx_queues[tx_queue_id];
+ cpfl_txq = dev->data->tx_queues[tx_queue_id];
/* Init the RX tail register. */
- IDPF_PCI_REG_WRITE(txq->qtx_tail, 0);
+ IDPF_PCI_REG_WRITE(cpfl_txq->base.qtx_tail, 0);
return 0;
}
@@ -626,12 +691,13 @@ cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
int
cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_tx_queue *txq =
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_tx_queue *cpfl_txq =
dev->data->tx_queues[tx_queue_id];
int err = 0;
- err = idpf_vc_txq_config(vport, txq);
+ err = idpf_vc_txq_config(vport, &cpfl_txq->base);
if (err != 0) {
PMD_DRV_LOG(ERR, "Fail to configure Tx queue %u", tx_queue_id);
return err;
@@ -650,7 +716,7 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
tx_queue_id);
} else {
- txq->q_started = true;
+ cpfl_txq->base.q_started = true;
dev->data->tx_queue_state[tx_queue_id] =
RTE_ETH_QUEUE_STATE_STARTED;
}
@@ -661,13 +727,16 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
int
cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq;
struct idpf_rx_queue *rxq;
int err;
if (rx_queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
+ cpfl_rxq = dev->data->rx_queues[rx_queue_id];
err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
@@ -675,7 +744,7 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return err;
}
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = &cpfl_rxq->base;
rxq->q_started = false;
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
rxq->ops->release_mbufs(rxq);
@@ -693,13 +762,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_tx_queue *cpfl_txq;
struct idpf_tx_queue *txq;
int err;
if (tx_queue_id >= dev->data->nb_tx_queues)
return -EINVAL;
+ cpfl_txq = dev->data->tx_queues[tx_queue_id];
+
err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
@@ -707,7 +780,7 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
return err;
}
- txq = dev->data->tx_queues[tx_queue_id];
+ txq = &cpfl_txq->base;
txq->q_started = false;
txq->ops->release_mbufs(txq);
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
@@ -724,25 +797,25 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
void
cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- idpf_qc_rx_queue_release(dev->data->rx_queues[qid]);
+ cpfl_rx_queue_release(dev->data->rx_queues[qid]);
}
void
cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- idpf_qc_tx_queue_release(dev->data->tx_queues[qid]);
+ cpfl_tx_queue_release(dev->data->tx_queues[qid]);
}
void
cpfl_stop_queues(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
- struct idpf_tx_queue *txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
int i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- if (rxq == NULL)
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq == NULL)
continue;
if (cpfl_rx_queue_stop(dev, i) != 0)
@@ -750,8 +823,8 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL)
continue;
if (cpfl_tx_queue_stop(dev, i) != 0)
@@ -762,9 +835,10 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
void
cpfl_set_rx_function(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
#ifdef RTE_ARCH_X86
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i;
if (cpfl_rx_vec_dev_check_default(dev) == CPFL_VECTOR_PATH &&
@@ -790,8 +864,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- (void)idpf_qc_splitq_rx_vec_setup(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ (void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
if (vport->rx_use_avx512) {
@@ -810,8 +884,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
} else {
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- (void)idpf_qc_singleq_rx_vec_setup(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ (void)idpf_qc_singleq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
if (vport->rx_use_avx512) {
@@ -860,10 +934,11 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
void
cpfl_set_tx_function(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
#ifdef RTE_ARCH_X86
#ifdef CC_AVX512_SUPPORT
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
int i;
#endif /* CC_AVX512_SUPPORT */
@@ -878,8 +953,8 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
vport->tx_use_avx512 = true;
if (vport->tx_use_avx512) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- idpf_qc_tx_vec_avx512_setup(txq);
+ cpfl_txq = dev->data->tx_queues[i];
+ idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);
}
}
}
@@ -916,10 +991,10 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
#ifdef CC_AVX512_SUPPORT
if (vport->tx_use_avx512) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL)
continue;
- idpf_qc_tx_vec_avx512_setup(txq);
+ idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);
}
PMD_DRV_LOG(NOTICE,
"Using Single AVX512 Vector Tx (port %d).",
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index fb267d38c8..bfb9ad97bd 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -23,6 +23,14 @@
#define CPFL_SUPPORT_CHAIN_NUM 5
+struct cpfl_rx_queue {
+ struct idpf_rx_queue base;
+};
+
+struct cpfl_tx_queue {
+ struct idpf_tx_queue base;
+};
+
int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
index 665418d27d..5690b17911 100644
--- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
+++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
@@ -76,15 +76,16 @@ cpfl_rx_splitq_vec_default(struct idpf_rx_queue *rxq)
static inline int
cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_rx_queue *rxq;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq;
int i, default_ret, splitq_ret, ret = CPFL_SCALAR_PATH;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- default_ret = cpfl_rx_vec_queue_default(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
- splitq_ret = cpfl_rx_splitq_vec_default(rxq);
+ splitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq->base);
ret = splitq_ret && default_ret;
} else {
ret = default_ret;
@@ -100,12 +101,12 @@ static inline int
cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)
{
int i;
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
int ret = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- ret = cpfl_tx_vec_queue_default(txq);
+ cpfl_txq = dev->data->tx_queues[i];
+ ret = cpfl_tx_vec_queue_default(&cpfl_txq->base);
if (ret == CPFL_SCALAR_PATH)
return CPFL_SCALAR_PATH;
}
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v8 02/14] common/idpf: support queue groups add/delete
2023-06-05 6:17 ` [PATCH v8 00/14] net/cpfl: add hairpin queue support beilei.xing
2023-06-05 6:17 ` [PATCH v8 01/14] net/cpfl: refine structures beilei.xing
@ 2023-06-05 6:17 ` beilei.xing
2023-06-05 6:17 ` [PATCH v8 03/14] net/cpfl: add haipin queue group during vport init beilei.xing
` (12 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 6:17 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds queue group add/delete virtual channel support.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 66 ++++++++++++++++++++++
drivers/common/idpf/idpf_common_virtchnl.h | 9 +++
drivers/common/idpf/version.map | 2 +
3 files changed, 77 insertions(+)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index b713678634..a3fe55c897 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -359,6 +359,72 @@ idpf_vc_vport_destroy(struct idpf_vport *vport)
return err;
}
+int
+idpf_vc_queue_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *p2p_queue_grps_info,
+ uint8_t *p2p_queue_grps_out)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_cmd_info args;
+ int size, qg_info_size;
+ int err = -1;
+
+ size = sizeof(*p2p_queue_grps_info) +
+ (p2p_queue_grps_info->qg_info.num_queue_groups - 1) *
+ sizeof(struct virtchnl2_queue_group_info);
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_ADD_QUEUE_GROUPS;
+ args.in_args = (uint8_t *)p2p_queue_grps_info;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ if (err != 0) {
+ DRV_LOG(ERR,
+ "Failed to execute command of VIRTCHNL2_OP_ADD_QUEUE_GROUPS");
+ return err;
+ }
+
+ rte_memcpy(p2p_queue_grps_out, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
+ return 0;
+}
+
+int idpf_vc_queue_grps_del(struct idpf_vport *vport,
+ uint16_t num_q_grps,
+ struct virtchnl2_queue_group_id *qg_ids)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_delete_queue_groups *vc_del_q_grps;
+ struct idpf_cmd_info args;
+ int size;
+ int err;
+
+ size = sizeof(*vc_del_q_grps) +
+ (num_q_grps - 1) * sizeof(struct virtchnl2_queue_group_id);
+ vc_del_q_grps = rte_zmalloc("vc_del_q_grps", size, 0);
+
+ vc_del_q_grps->vport_id = vport->vport_id;
+ vc_del_q_grps->num_queue_groups = num_q_grps;
+ memcpy(vc_del_q_grps->qg_ids, qg_ids,
+ num_q_grps * sizeof(struct virtchnl2_queue_group_id));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_DEL_QUEUE_GROUPS;
+ args.in_args = (uint8_t *)vc_del_q_grps;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_DEL_QUEUE_GROUPS");
+
+ rte_free(vc_del_q_grps);
+ return err;
+}
+
int
idpf_vc_rss_key_set(struct idpf_vport *vport)
{
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index c45295290e..58b16e1c5d 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -64,4 +64,13 @@ int idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
__rte_internal
int idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
u16 *buff_count, struct idpf_dma_mem **buffs);
+__rte_internal
+int idpf_vc_queue_grps_del(struct idpf_vport *vport,
+ uint16_t num_q_grps,
+ struct virtchnl2_queue_group_id *qg_ids);
+__rte_internal
+int
+idpf_vc_queue_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *ptp_queue_grps_info,
+ uint8_t *ptp_queue_grps_out);
#endif /* _IDPF_COMMON_VIRTCHNL_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 70334a1b03..01d18f3f3f 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -43,6 +43,8 @@ INTERNAL {
idpf_vc_irq_map_unmap_config;
idpf_vc_one_msg_read;
idpf_vc_ptype_info_query;
+ idpf_vc_queue_grps_add;
+ idpf_vc_queue_grps_del;
idpf_vc_queue_switch;
idpf_vc_queues_ena_dis;
idpf_vc_rss_hash_get;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v8 03/14] net/cpfl: add haipin queue group during vport init
2023-06-05 6:17 ` [PATCH v8 00/14] net/cpfl: add hairpin queue support beilei.xing
2023-06-05 6:17 ` [PATCH v8 01/14] net/cpfl: refine structures beilei.xing
2023-06-05 6:17 ` [PATCH v8 02/14] common/idpf: support queue groups add/delete beilei.xing
@ 2023-06-05 6:17 ` beilei.xing
2023-06-05 8:35 ` Wu, Jingjing
2023-06-05 6:17 ` [PATCH v8 04/14] net/cpfl: support hairpin queue capbility get beilei.xing
` (11 subsequent siblings)
14 siblings, 1 reply; 164+ messages in thread
From: beilei.xing @ 2023-06-05 6:17 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds haipin queue group during vport init.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 133 +++++++++++++++++++++++++++++++++
drivers/net/cpfl/cpfl_ethdev.h | 18 +++++
drivers/net/cpfl/cpfl_rxtx.h | 7 ++
3 files changed, 158 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index e587155db6..c1273a7478 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -840,6 +840,20 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
return 0;
}
+static int
+cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
+{
+ struct virtchnl2_queue_group_id qg_ids[CPFL_P2P_NB_QUEUE_GRPS] = {0};
+ int ret = 0;
+
+ qg_ids[0].queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
+ qg_ids[0].queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
+ ret = idpf_vc_queue_grps_del(vport, CPFL_P2P_NB_QUEUE_GRPS, qg_ids);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to delete p2p queue groups");
+ return ret;
+}
+
static int
cpfl_dev_close(struct rte_eth_dev *dev)
{
@@ -848,7 +862,12 @@ cpfl_dev_close(struct rte_eth_dev *dev)
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
+
+ if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
+ cpfl_p2p_queue_grps_del(vport);
+
idpf_vport_deinit(vport);
+ rte_free(cpfl_vport->p2p_q_chunks_info);
adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
adapter->cur_vport_nb--;
@@ -1284,6 +1303,96 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter)
return vport_idx;
}
+static int
+cpfl_p2p_q_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *p2p_queue_grps_info,
+ uint8_t *p2p_q_vc_out_info)
+{
+ int ret;
+
+ p2p_queue_grps_info->vport_id = vport->vport_id;
+ p2p_queue_grps_info->qg_info.num_queue_groups = CPFL_P2P_NB_QUEUE_GRPS;
+ p2p_queue_grps_info->qg_info.groups[0].num_rx_q = CPFL_MAX_P2P_NB_QUEUES;
+ p2p_queue_grps_info->qg_info.groups[0].num_rx_bufq = CPFL_P2P_NB_RX_BUFQ;
+ p2p_queue_grps_info->qg_info.groups[0].num_tx_q = CPFL_MAX_P2P_NB_QUEUES;
+ p2p_queue_grps_info->qg_info.groups[0].num_tx_complq = CPFL_P2P_NB_TX_COMPLQ;
+ p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
+ p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
+ p2p_queue_grps_info->qg_info.groups[0].rx_q_grp_info.rss_lut_size = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.tx_tc = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.priority = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.is_sp = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.pir_weight = 0;
+
+ ret = idpf_vc_queue_grps_add(vport, p2p_queue_grps_info, p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to add p2p queue groups.");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
+ struct virtchnl2_add_queue_groups *p2p_q_vc_out_info)
+{
+ struct p2p_queue_chunks_info *p2p_q_chunks_info = cpfl_vport->p2p_q_chunks_info;
+ struct virtchnl2_queue_reg_chunks *vc_chunks_out;
+ int i, type;
+
+ if (p2p_q_vc_out_info->qg_info.groups[0].qg_id.queue_group_type !=
+ VIRTCHNL2_QUEUE_GROUP_P2P) {
+ PMD_DRV_LOG(ERR, "Add queue group response mismatch.");
+ return -EINVAL;
+ }
+
+ vc_chunks_out = &p2p_q_vc_out_info->qg_info.groups[0].chunks;
+
+ for (i = 0; i < vc_chunks_out->num_chunks; i++) {
+ type = vc_chunks_out->chunks[i].type;
+ switch (type) {
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ p2p_q_chunks_info->tx_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->tx_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->tx_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ p2p_q_chunks_info->rx_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->rx_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->rx_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ p2p_q_chunks_info->tx_compl_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->tx_compl_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->tx_compl_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ p2p_q_chunks_info->rx_buf_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->rx_buf_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->rx_buf_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported queue type");
+ break;
+ }
+ }
+
+ return 0;
+}
+
static int
cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
{
@@ -1293,6 +1402,8 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
struct cpfl_adapter_ext *adapter = param->adapter;
/* for sending create vport virtchnl msg prepare */
struct virtchnl2_create_vport create_vport_info;
+ struct virtchnl2_add_queue_groups p2p_queue_grps_info;
+ uint8_t p2p_q_vc_out_info[IDPF_DFLT_MBX_BUF_SIZE] = {0};
int ret = 0;
dev->dev_ops = &cpfl_eth_dev_ops;
@@ -1327,6 +1438,28 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
&dev->data->mac_addrs[0]);
+ if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
+ memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
+ ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to add p2p queue group.");
+ return 0;
+ }
+ cpfl_vport->p2p_q_chunks_info = rte_zmalloc(NULL,
+ sizeof(struct p2p_queue_chunks_info), 0);
+ if (cpfl_vport->p2p_q_chunks_info == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate p2p queue info.");
+ cpfl_p2p_queue_grps_del(vport);
+ return 0;
+ }
+ ret = cpfl_p2p_queue_info_init(cpfl_vport,
+ (struct virtchnl2_add_queue_groups *)p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to init p2p queue info.");
+ cpfl_p2p_queue_grps_del(vport);
+ }
+ }
+
return 0;
err_mac_addrs:
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 81fe9ac4c3..666d46a44a 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -56,6 +56,7 @@
/* Device IDs */
#define IDPF_DEV_ID_CPF 0x1453
+#define VIRTCHNL2_QUEUE_GROUP_P2P 0x100
struct cpfl_vport_param {
struct cpfl_adapter_ext *adapter;
@@ -69,8 +70,25 @@ struct cpfl_devargs {
uint16_t req_vport_nb;
};
+struct p2p_queue_chunks_info {
+ uint32_t tx_start_qid;
+ uint32_t rx_start_qid;
+ uint32_t tx_compl_start_qid;
+ uint32_t rx_buf_start_qid;
+
+ uint64_t tx_qtail_start;
+ uint32_t tx_qtail_spacing;
+ uint64_t rx_qtail_start;
+ uint32_t rx_qtail_spacing;
+ uint64_t tx_compl_qtail_start;
+ uint32_t tx_compl_qtail_spacing;
+ uint64_t rx_buf_qtail_start;
+ uint32_t rx_buf_qtail_spacing;
+};
+
struct cpfl_vport {
struct idpf_vport base;
+ struct p2p_queue_chunks_info *p2p_q_chunks_info;
};
struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index bfb9ad97bd..1fe65778f0 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -13,6 +13,13 @@
#define CPFL_MIN_RING_DESC 32
#define CPFL_MAX_RING_DESC 4096
#define CPFL_DMA_MEM_ALIGN 4096
+
+#define CPFL_MAX_P2P_NB_QUEUES 16
+#define CPFL_P2P_NB_RX_BUFQ 1
+#define CPFL_P2P_NB_TX_COMPLQ 1
+#define CPFL_P2P_NB_QUEUE_GRPS 1
+#define CPFL_P2P_QUEUE_GRP_ID 1
+
/* Base address of the HW descriptor ring should be 128B aligned. */
#define CPFL_RING_BASE_ALIGN 128
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v8 04/14] net/cpfl: support hairpin queue capbility get
2023-06-05 6:17 ` [PATCH v8 00/14] net/cpfl: add hairpin queue support beilei.xing
` (2 preceding siblings ...)
2023-06-05 6:17 ` [PATCH v8 03/14] net/cpfl: add haipin queue group during vport init beilei.xing
@ 2023-06-05 6:17 ` beilei.xing
2023-06-05 6:17 ` [PATCH v8 05/14] net/cpfl: support hairpin queue setup and release beilei.xing
` (10 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 6:17 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch adds hairpin_cap_get ops support.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 18 ++++++++++++++++++
drivers/net/cpfl/cpfl_rxtx.h | 3 +++
2 files changed, 21 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index c1273a7478..40b4515539 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -154,6 +154,23 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,
return rte_eth_linkstatus_set(dev, &new_link);
}
+static int
+cpfl_hairpin_cap_get(struct rte_eth_dev *dev,
+ struct rte_eth_hairpin_cap *cap)
+{
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+
+ if (cpfl_vport->p2p_q_chunks_info == NULL)
+ return -ENOTSUP;
+
+ cap->max_nb_queues = CPFL_MAX_P2P_NB_QUEUES;
+ cap->max_rx_2_tx = CPFL_MAX_HAIRPINQ_RX_2_TX;
+ cap->max_tx_2_rx = CPFL_MAX_HAIRPINQ_TX_2_RX;
+ cap->max_nb_desc = CPFL_MAX_HAIRPINQ_NB_DESC;
+
+ return 0;
+}
+
static int
cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
@@ -904,6 +921,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.xstats_get = cpfl_dev_xstats_get,
.xstats_get_names = cpfl_dev_xstats_get_names,
.xstats_reset = cpfl_dev_xstats_reset,
+ .hairpin_cap_get = cpfl_hairpin_cap_get,
};
static int
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 1fe65778f0..a4a164d462 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -14,6 +14,9 @@
#define CPFL_MAX_RING_DESC 4096
#define CPFL_DMA_MEM_ALIGN 4096
+#define CPFL_MAX_HAIRPINQ_RX_2_TX 1
+#define CPFL_MAX_HAIRPINQ_TX_2_RX 1
+#define CPFL_MAX_HAIRPINQ_NB_DESC 1024
#define CPFL_MAX_P2P_NB_QUEUES 16
#define CPFL_P2P_NB_RX_BUFQ 1
#define CPFL_P2P_NB_TX_COMPLQ 1
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v8 05/14] net/cpfl: support hairpin queue setup and release
2023-06-05 6:17 ` [PATCH v8 00/14] net/cpfl: add hairpin queue support beilei.xing
` (3 preceding siblings ...)
2023-06-05 6:17 ` [PATCH v8 04/14] net/cpfl: support hairpin queue capbility get beilei.xing
@ 2023-06-05 6:17 ` beilei.xing
2023-06-05 6:17 ` [PATCH v8 06/14] common/idpf: add queue config API beilei.xing
` (9 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 6:17 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
Support hairpin Rx/Tx queue setup and release.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 6 +
drivers/net/cpfl/cpfl_ethdev.h | 11 +
drivers/net/cpfl/cpfl_rxtx.c | 364 +++++++++++++++++++++++-
drivers/net/cpfl/cpfl_rxtx.h | 36 +++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 4 +
5 files changed, 420 insertions(+), 1 deletion(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 40b4515539..b17c538ec2 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -879,6 +879,10 @@ cpfl_dev_close(struct rte_eth_dev *dev)
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
+ if (cpfl_vport->p2p_mp) {
+ rte_mempool_free(cpfl_vport->p2p_mp);
+ cpfl_vport->p2p_mp = NULL;
+ }
if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
cpfl_p2p_queue_grps_del(vport);
@@ -922,6 +926,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.xstats_get_names = cpfl_dev_xstats_get_names,
.xstats_reset = cpfl_dev_xstats_reset,
.hairpin_cap_get = cpfl_hairpin_cap_get,
+ .rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
+ .tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
};
static int
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 666d46a44a..2e42354f70 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -89,6 +89,17 @@ struct p2p_queue_chunks_info {
struct cpfl_vport {
struct idpf_vport base;
struct p2p_queue_chunks_info *p2p_q_chunks_info;
+
+ struct rte_mempool *p2p_mp;
+
+ uint16_t nb_data_rxq;
+ uint16_t nb_data_txq;
+ uint16_t nb_p2p_rxq;
+ uint16_t nb_p2p_txq;
+
+ struct idpf_rx_queue *p2p_rx_bufq;
+ struct idpf_tx_queue *p2p_tx_complq;
+ bool p2p_manual_bind;
};
struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 04a51b8d15..90b408d1f4 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -10,6 +10,67 @@
#include "cpfl_rxtx.h"
#include "cpfl_rxtx_vec_common.h"
+static inline void
+cpfl_tx_hairpin_descq_reset(struct idpf_tx_queue *txq)
+{
+ uint32_t i, size;
+
+ if (!txq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+ return;
+ }
+
+ size = txq->nb_tx_desc * CPFL_P2P_DESC_LEN;
+ for (i = 0; i < size; i++)
+ ((volatile char *)txq->desc_ring)[i] = 0;
+}
+
+static inline void
+cpfl_tx_hairpin_complq_reset(struct idpf_tx_queue *cq)
+{
+ uint32_t i, size;
+
+ if (!cq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to complq is NULL");
+ return;
+ }
+
+ size = cq->nb_tx_desc * CPFL_P2P_DESC_LEN;
+ for (i = 0; i < size; i++)
+ ((volatile char *)cq->compl_ring)[i] = 0;
+}
+
+static inline void
+cpfl_rx_hairpin_descq_reset(struct idpf_rx_queue *rxq)
+{
+ uint16_t len;
+ uint32_t i;
+
+ if (!rxq)
+ return;
+
+ len = rxq->nb_rx_desc;
+ for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
+ ((volatile char *)rxq->rx_ring)[i] = 0;
+}
+
+static inline void
+cpfl_rx_hairpin_bufq_reset(struct idpf_rx_queue *rxbq)
+{
+ uint16_t len;
+ uint32_t i;
+
+ if (!rxbq)
+ return;
+
+ len = rxbq->nb_rx_desc;
+ for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
+ ((volatile char *)rxbq->rx_ring)[i] = 0;
+
+ rxbq->bufq1 = NULL;
+ rxbq->bufq2 = NULL;
+}
+
static uint64_t
cpfl_rx_offload_convert(uint64_t offload)
{
@@ -234,7 +295,10 @@ cpfl_rx_queue_release(void *rxq)
/* Split queue */
if (!q->adapter->is_rx_singleq) {
- if (q->bufq2)
+ /* the mz is shared between Tx/Rx hairpin, let Rx_release
+ * free the buf, q->bufq1->mz and q->mz.
+ */
+ if (!cpfl_rxq->hairpin_info.hairpin_q && q->bufq2)
cpfl_rx_split_bufq_release(q->bufq2);
if (q->bufq1)
@@ -385,6 +449,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
}
+ cpfl_vport->nb_data_rxq++;
rxq->q_set = true;
dev->data->rx_queues[queue_idx] = cpfl_rxq;
@@ -548,6 +613,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
queue_idx * vport->chunks_info.tx_qtail_spacing);
txq->ops = &def_txq_ops;
+ cpfl_vport->nb_data_txq++;
txq->q_set = true;
dev->data->tx_queues[queue_idx] = cpfl_txq;
@@ -562,6 +628,300 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return ret;
}
+static int
+cpfl_rx_hairpin_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,
+ uint16_t logic_qid, uint16_t nb_desc)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct rte_mempool *mp;
+ char pool_name[RTE_MEMPOOL_NAMESIZE];
+
+ mp = cpfl_vport->p2p_mp;
+ if (!mp) {
+ snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "p2p_mb_pool_%u",
+ dev->data->port_id);
+ mp = rte_pktmbuf_pool_create(pool_name, CPFL_P2P_NB_MBUF * CPFL_MAX_P2P_NB_QUEUES,
+ CPFL_P2P_CACHE_SIZE, 0, CPFL_P2P_MBUF_SIZE,
+ dev->device->numa_node);
+ if (!mp) {
+ PMD_INIT_LOG(ERR, "Failed to allocate mbuf pool for p2p");
+ return -ENOMEM;
+ }
+ cpfl_vport->p2p_mp = mp;
+ }
+
+ bufq->mp = mp;
+ bufq->nb_rx_desc = nb_desc;
+ bufq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_buf_start_qid,
+ logic_qid);
+ bufq->port_id = dev->data->port_id;
+ bufq->adapter = adapter;
+ bufq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
+
+ bufq->q_set = true;
+ bufq->ops = &def_rxq_ops;
+
+ return 0;
+}
+
+int
+cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf)
+{
+ struct cpfl_vport *cpfl_vport = (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter_base = vport->adapter;
+ uint16_t logic_qid = cpfl_vport->nb_p2p_rxq;
+ struct cpfl_rxq_hairpin_info *hairpin_info;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct idpf_rx_queue *bufq1 = NULL;
+ struct idpf_rx_queue *rxq;
+ uint16_t peer_port, peer_q;
+ uint16_t qid;
+ int ret;
+
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue.");
+ return -EINVAL;
+ }
+
+ if (conf->peer_count != 1) {
+ PMD_INIT_LOG(ERR, "Can't support Rx hairpin queue peer count %d", conf->peer_count);
+ return -EINVAL;
+ }
+
+ peer_port = conf->peers[0].port;
+ peer_q = conf->peers[0].queue;
+
+ if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
+ nb_desc > CPFL_MAX_RING_DESC ||
+ nb_desc < CPFL_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is invalid", nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory if needed */
+ if (dev->data->rx_queues[queue_idx]) {
+ cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* Setup Rx description queue */
+ cpfl_rxq = rte_zmalloc_socket("cpfl hairpin rxq",
+ sizeof(struct cpfl_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!cpfl_rxq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
+ return -ENOMEM;
+ }
+
+ rxq = &cpfl_rxq->base;
+ hairpin_info = &cpfl_rxq->hairpin_info;
+ rxq->nb_rx_desc = nb_desc * 2;
+ rxq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid, logic_qid);
+ rxq->port_id = dev->data->port_id;
+ rxq->adapter = adapter_base;
+ rxq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
+ hairpin_info->hairpin_q = true;
+ hairpin_info->peer_txp = peer_port;
+ hairpin_info->peer_txq_id = peer_q;
+
+ if (conf->manual_bind != 0)
+ cpfl_vport->p2p_manual_bind = true;
+ else
+ cpfl_vport->p2p_manual_bind = false;
+
+ if (cpfl_vport->p2p_rx_bufq == NULL) {
+ bufq1 = rte_zmalloc_socket("hairpin rx bufq1",
+ sizeof(struct idpf_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!bufq1) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for hairpin Rx buffer queue 1.");
+ ret = -ENOMEM;
+ goto err_alloc_bufq1;
+ }
+ qid = 2 * logic_qid;
+ ret = cpfl_rx_hairpin_bufq_setup(dev, bufq1, qid, nb_desc);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to setup hairpin Rx buffer queue 1");
+ ret = -EINVAL;
+ goto err_setup_bufq1;
+ }
+ cpfl_vport->p2p_rx_bufq = bufq1;
+ }
+
+ rxq->bufq1 = cpfl_vport->p2p_rx_bufq;
+ rxq->bufq2 = NULL;
+
+ cpfl_vport->nb_p2p_rxq++;
+ rxq->q_set = true;
+ dev->data->rx_queues[queue_idx] = cpfl_rxq;
+
+ return 0;
+
+err_setup_bufq1:
+ rte_mempool_free(cpfl_vport->p2p_mp);
+ rte_free(bufq1);
+err_alloc_bufq1:
+ rte_free(cpfl_rxq);
+
+ return ret;
+}
+
+int
+cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter_base = vport->adapter;
+ uint16_t logic_qid = cpfl_vport->nb_p2p_txq;
+ struct cpfl_txq_hairpin_info *hairpin_info;
+ struct idpf_hw *hw = &adapter_base->hw;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct idpf_tx_queue *txq, *cq;
+ const struct rte_memzone *mz;
+ uint32_t ring_size;
+ uint16_t peer_port, peer_q;
+ int ret;
+
+ if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue.");
+ return -EINVAL;
+ }
+
+ if (conf->peer_count != 1) {
+ PMD_INIT_LOG(ERR, "Can't support Tx hairpin queue peer count %d", conf->peer_count);
+ return -EINVAL;
+ }
+
+ peer_port = conf->peers[0].port;
+ peer_q = conf->peers[0].queue;
+
+ if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
+ nb_desc > CPFL_MAX_RING_DESC ||
+ nb_desc < CPFL_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is invalid",
+ nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory if needed. */
+ if (dev->data->tx_queues[queue_idx]) {
+ cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate the TX queue data structure. */
+ cpfl_txq = rte_zmalloc_socket("cpfl hairpin txq",
+ sizeof(struct cpfl_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!cpfl_txq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+ return -ENOMEM;
+ }
+
+ txq = &cpfl_txq->base;
+ hairpin_info = &cpfl_txq->hairpin_info;
+ /* Txq ring length should be 2 times of Tx completion queue size. */
+ txq->nb_tx_desc = nb_desc * 2;
+ txq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_start_qid, logic_qid);
+ txq->port_id = dev->data->port_id;
+ hairpin_info->hairpin_q = true;
+ hairpin_info->peer_rxp = peer_port;
+ hairpin_info->peer_rxq_id = peer_q;
+
+ if (conf->manual_bind != 0)
+ cpfl_vport->p2p_manual_bind = true;
+ else
+ cpfl_vport->p2p_manual_bind = false;
+
+ /* Always Tx hairpin queue allocates Tx HW ring */
+ ring_size = RTE_ALIGN(txq->nb_tx_desc * CPFL_P2P_DESC_LEN,
+ CPFL_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_ring", logic_qid,
+ ring_size + CPFL_P2P_RING_BUF,
+ CPFL_RING_BASE_ALIGN,
+ dev->device->numa_node);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
+ ret = -ENOMEM;
+ goto err_txq_mz_rsv;
+ }
+
+ txq->tx_ring_phys_addr = mz->iova;
+ txq->desc_ring = mz->addr;
+ txq->mz = mz;
+
+ cpfl_tx_hairpin_descq_reset(txq);
+ txq->qtx_tail = hw->hw_addr +
+ cpfl_hw_qtail_get(cpfl_vport->p2p_q_chunks_info->tx_qtail_start,
+ logic_qid, cpfl_vport->p2p_q_chunks_info->tx_qtail_spacing);
+ txq->ops = &def_txq_ops;
+
+ if (cpfl_vport->p2p_tx_complq == NULL) {
+ cq = rte_zmalloc_socket("cpfl hairpin cq",
+ sizeof(struct idpf_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ dev->device->numa_node);
+ if (!cq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+ ret = -ENOMEM;
+ goto err_cq_alloc;
+ }
+
+ cq->nb_tx_desc = nb_desc;
+ cq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_compl_start_qid,
+ 0);
+ cq->port_id = dev->data->port_id;
+
+ /* Tx completion queue always allocates the HW ring */
+ ring_size = RTE_ALIGN(cq->nb_tx_desc * CPFL_P2P_DESC_LEN,
+ CPFL_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_compl_ring", logic_qid,
+ ring_size + CPFL_P2P_RING_BUF,
+ CPFL_RING_BASE_ALIGN,
+ dev->device->numa_node);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX completion queue");
+ ret = -ENOMEM;
+ goto err_cq_mz_rsv;
+ }
+ cq->tx_ring_phys_addr = mz->iova;
+ cq->compl_ring = mz->addr;
+ cq->mz = mz;
+
+ cpfl_tx_hairpin_complq_reset(cq);
+ cpfl_vport->p2p_tx_complq = cq;
+ }
+
+ txq->complq = cpfl_vport->p2p_tx_complq;
+
+ cpfl_vport->nb_p2p_txq++;
+ txq->q_set = true;
+ dev->data->tx_queues[queue_idx] = cpfl_txq;
+
+ return 0;
+
+err_cq_mz_rsv:
+ rte_free(cq);
+err_cq_alloc:
+ cpfl_dma_zone_release(mz);
+err_txq_mz_rsv:
+ rte_free(cpfl_txq);
+ return ret;
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
@@ -865,6 +1225,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ continue;
(void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index a4a164d462..06198d4aad 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -22,6 +22,11 @@
#define CPFL_P2P_NB_TX_COMPLQ 1
#define CPFL_P2P_NB_QUEUE_GRPS 1
#define CPFL_P2P_QUEUE_GRP_ID 1
+#define CPFL_P2P_DESC_LEN 16
+#define CPFL_P2P_NB_MBUF 4096
+#define CPFL_P2P_CACHE_SIZE 250
+#define CPFL_P2P_MBUF_SIZE 2048
+#define CPFL_P2P_RING_BUF 128
/* Base address of the HW descriptor ring should be 128B aligned. */
#define CPFL_RING_BASE_ALIGN 128
@@ -33,14 +38,40 @@
#define CPFL_SUPPORT_CHAIN_NUM 5
+struct cpfl_rxq_hairpin_info {
+ bool hairpin_q; /* if rx queue is a hairpin queue */
+ uint16_t peer_txp;
+ uint16_t peer_txq_id;
+};
+
struct cpfl_rx_queue {
struct idpf_rx_queue base;
+ struct cpfl_rxq_hairpin_info hairpin_info;
+};
+
+struct cpfl_txq_hairpin_info {
+ bool hairpin_q; /* if tx queue is a hairpin queue */
+ uint16_t peer_rxp;
+ uint16_t peer_rxq_id;
};
struct cpfl_tx_queue {
struct idpf_tx_queue base;
+ struct cpfl_txq_hairpin_info hairpin_info;
};
+static inline uint16_t
+cpfl_hw_qid_get(uint16_t start_qid, uint16_t offset)
+{
+ return start_qid + offset;
+}
+
+static inline uint64_t
+cpfl_hw_qtail_get(uint64_t tail_start, uint16_t offset, uint64_t tail_spacing)
+{
+ return tail_start + offset * tail_spacing;
+}
+
int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
@@ -59,4 +90,9 @@ void cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void cpfl_set_rx_function(struct rte_eth_dev *dev);
void cpfl_set_tx_function(struct rte_eth_dev *dev);
+int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, const struct rte_eth_hairpin_conf *conf);
+int cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf);
#endif /* _CPFL_RXTX_H_ */
diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
index 5690b17911..d8e9191196 100644
--- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
+++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
@@ -85,6 +85,8 @@ cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)
cpfl_rxq = dev->data->rx_queues[i];
default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ continue;
splitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq->base);
ret = splitq_ret && default_ret;
} else {
@@ -106,6 +108,8 @@ cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_tx_queues; i++) {
cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq->hairpin_info.hairpin_q)
+ continue;
ret = cpfl_tx_vec_queue_default(&cpfl_txq->base);
if (ret == CPFL_SCALAR_PATH)
return CPFL_SCALAR_PATH;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v8 06/14] common/idpf: add queue config API
2023-06-05 6:17 ` [PATCH v8 00/14] net/cpfl: add hairpin queue support beilei.xing
` (4 preceding siblings ...)
2023-06-05 6:17 ` [PATCH v8 05/14] net/cpfl: support hairpin queue setup and release beilei.xing
@ 2023-06-05 6:17 ` beilei.xing
2023-06-05 6:17 ` [PATCH v8 07/14] net/cpfl: support hairpin queue configuration beilei.xing
` (8 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 6:17 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx queue configuration APIs.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 70 ++++++++++++++++++++++
drivers/common/idpf/idpf_common_virtchnl.h | 6 ++
drivers/common/idpf/version.map | 2 +
3 files changed, 78 insertions(+)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index a3fe55c897..211b44a88e 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -1050,6 +1050,41 @@ idpf_vc_rxq_config(struct idpf_vport *vport, struct idpf_rx_queue *rxq)
return err;
}
+int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct virtchnl2_rxq_info *rxq_info,
+ uint16_t num_qs)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+ struct idpf_cmd_info args;
+ int size, err, i;
+
+ size = sizeof(*vc_rxqs) + (num_qs - 1) *
+ sizeof(struct virtchnl2_rxq_info);
+ vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+ if (vc_rxqs == NULL) {
+ DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+ err = -ENOMEM;
+ return err;
+ }
+ vc_rxqs->vport_id = vport->vport_id;
+ vc_rxqs->num_qinfo = num_qs;
+ memcpy(vc_rxqs->qinfo, rxq_info, num_qs * sizeof(struct virtchnl2_rxq_info));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+ args.in_args = (uint8_t *)vc_rxqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ rte_free(vc_rxqs);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+ return err;
+}
+
int
idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq)
{
@@ -1121,6 +1156,41 @@ idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq)
return err;
}
+int
+idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
+ uint16_t num_qs)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+ struct idpf_cmd_info args;
+ int size, err;
+
+ size = sizeof(*vc_txqs) + (num_qs - 1) * sizeof(struct virtchnl2_txq_info);
+ vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+ if (vc_txqs == NULL) {
+ DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+ err = -ENOMEM;
+ return err;
+ }
+ vc_txqs->vport_id = vport->vport_id;
+ vc_txqs->num_qinfo = num_qs;
+ memcpy(vc_txqs->qinfo, txq_info, num_qs * sizeof(struct virtchnl2_txq_info));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+ args.in_args = (uint8_t *)vc_txqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ rte_free(vc_txqs);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+ return err;
+}
+
int
idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
struct idpf_ctlq_msg *q_msg)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index 58b16e1c5d..db83761a5e 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -65,6 +65,12 @@ __rte_internal
int idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
u16 *buff_count, struct idpf_dma_mem **buffs);
__rte_internal
+int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct virtchnl2_rxq_info *rxq_info,
+ uint16_t num_qs);
+__rte_internal
+int idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
+ uint16_t num_qs);
+__rte_internal
int idpf_vc_queue_grps_del(struct idpf_vport *vport,
uint16_t num_q_grps,
struct virtchnl2_queue_group_id *qg_ids);
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 01d18f3f3f..17e77884ce 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -54,8 +54,10 @@ INTERNAL {
idpf_vc_rss_lut_get;
idpf_vc_rss_lut_set;
idpf_vc_rxq_config;
+ idpf_vc_rxq_config_by_info;
idpf_vc_stats_query;
idpf_vc_txq_config;
+ idpf_vc_txq_config_by_info;
idpf_vc_vectors_alloc;
idpf_vc_vectors_dealloc;
idpf_vc_vport_create;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v8 07/14] net/cpfl: support hairpin queue configuration
2023-06-05 6:17 ` [PATCH v8 00/14] net/cpfl: add hairpin queue support beilei.xing
` (5 preceding siblings ...)
2023-06-05 6:17 ` [PATCH v8 06/14] common/idpf: add queue config API beilei.xing
@ 2023-06-05 6:17 ` beilei.xing
2023-06-05 6:17 ` [PATCH v8 08/14] common/idpf: add switch queue API beilei.xing
` (7 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 6:17 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx hairpin queue configuration.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 136 +++++++++++++++++++++++++++++++--
drivers/net/cpfl/cpfl_rxtx.c | 80 +++++++++++++++++++
drivers/net/cpfl/cpfl_rxtx.h | 7 ++
3 files changed, 217 insertions(+), 6 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index b17c538ec2..a06def06d0 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -742,33 +742,157 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
return idpf_vport_irq_map_config(vport, nb_rx_queues);
}
+/* Update hairpin_info for dev's tx hairpin queue */
+static int
+cpfl_txq_hairpin_info_update(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port];
+ struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private;
+ struct cpfl_txq_hairpin_info *hairpin_info;
+ struct cpfl_tx_queue *cpfl_txq;
+ int i;
+
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ hairpin_info = &cpfl_txq->hairpin_info;
+ if (hairpin_info->peer_rxp != rx_port) {
+ PMD_DRV_LOG(ERR, "port %d is not the peer port", rx_port);
+ return -EINVAL;
+ }
+ hairpin_info->peer_rxq_id =
+ cpfl_hw_qid_get(cpfl_rx_vport->p2p_q_chunks_info->rx_start_qid,
+ hairpin_info->peer_rxq_id - cpfl_rx_vport->nb_data_rxq);
+ }
+
+ return 0;
+}
+
+/* Bind Rx hairpin queue's memory zone to peer Tx hairpin queue's memory zone */
+static void
+cpfl_rxq_hairpin_mz_bind(struct rte_eth_dev *dev)
+{
+ struct cpfl_vport *cpfl_rx_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_rx_vport->base;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_hw *hw = &adapter->hw;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct rte_eth_dev *peer_dev;
+ const struct rte_memzone *mz;
+ uint16_t peer_tx_port;
+ uint16_t peer_tx_qid;
+ int i;
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ peer_tx_port = cpfl_rxq->hairpin_info.peer_txp;
+ peer_tx_qid = cpfl_rxq->hairpin_info.peer_txq_id;
+ peer_dev = &rte_eth_devices[peer_tx_port];
+ cpfl_txq = peer_dev->data->tx_queues[peer_tx_qid];
+
+ /* bind rx queue */
+ mz = cpfl_txq->base.mz;
+ cpfl_rxq->base.rx_ring_phys_addr = mz->iova;
+ cpfl_rxq->base.rx_ring = mz->addr;
+ cpfl_rxq->base.mz = mz;
+
+ /* bind rx buffer queue */
+ mz = cpfl_txq->base.complq->mz;
+ cpfl_rxq->base.bufq1->rx_ring_phys_addr = mz->iova;
+ cpfl_rxq->base.bufq1->rx_ring = mz->addr;
+ cpfl_rxq->base.bufq1->mz = mz;
+ cpfl_rxq->base.bufq1->qrx_tail = hw->hw_addr +
+ cpfl_hw_qtail_get(cpfl_rx_vport->p2p_q_chunks_info->rx_buf_qtail_start,
+ 0, cpfl_rx_vport->p2p_q_chunks_info->rx_buf_qtail_spacing);
+ }
+}
+
static int
cpfl_start_queues(struct rte_eth_dev *dev)
{
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_rx_queue *cpfl_rxq;
struct cpfl_tx_queue *cpfl_txq;
+ int update_flag = 0;
int err = 0;
int i;
+ /* For normal data queues, configure, init and enale Txq.
+ * For non-manual bind hairpin queues, configure Txq.
+ */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
cpfl_txq = dev->data->tx_queues[i];
if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
continue;
- err = cpfl_tx_queue_start(dev, i);
+ if (!cpfl_txq->hairpin_info.hairpin_q) {
+ err = cpfl_tx_queue_start(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
+ return err;
+ }
+ } else if (!cpfl_vport->p2p_manual_bind) {
+ if (update_flag == 0) {
+ err = cpfl_txq_hairpin_info_update(dev,
+ cpfl_txq->hairpin_info.peer_rxp);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to update Tx hairpin queue info");
+ return err;
+ }
+ update_flag = 1;
+ }
+ err = cpfl_hairpin_txq_config(vport, cpfl_txq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u", i);
+ return err;
+ }
+ }
+ }
+
+ /* For non-manual bind hairpin queues, configure Tx completion queue first.*/
+ if (!cpfl_vport->p2p_manual_bind && cpfl_vport->p2p_tx_complq != NULL) {
+ err = cpfl_hairpin_tx_complq_config(cpfl_vport);
if (err != 0) {
- PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
+ PMD_DRV_LOG(ERR, "Fail to config Tx completion queue");
return err;
}
}
+ /* For non-manual bind hairpin queues, configure Rx buffer queue.*/
+ if (!cpfl_vport->p2p_manual_bind && cpfl_vport->p2p_rx_bufq != NULL) {
+ cpfl_rxq_hairpin_mz_bind(dev);
+ err = cpfl_hairpin_rx_bufq_config(cpfl_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue");
+ return err;
+ }
+ }
+
+ /* For normal data queues, configure, init and enale Rxq.
+ * For non-manual bind hairpin queues, configure Rxq, and then init Rxq.
+ */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)
continue;
- err = cpfl_rx_queue_start(dev, i);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
- return err;
+ if (!cpfl_rxq->hairpin_info.hairpin_q) {
+ err = cpfl_rx_queue_start(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
+ return err;
+ }
+ } else if (!cpfl_vport->p2p_manual_bind) {
+ err = cpfl_hairpin_rxq_config(vport, cpfl_rxq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue %u", i);
+ return err;
+ }
+ err = cpfl_rx_queue_init(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u", i);
+ return err;
+ }
}
}
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 90b408d1f4..9408c6e1a4 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -922,6 +922,86 @@ cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return ret;
}
+int
+cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport)
+{
+ struct idpf_rx_queue *rx_bufq = cpfl_vport->p2p_rx_bufq;
+ struct virtchnl2_rxq_info rxq_info[1] = {0};
+
+ rxq_info[0].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ rxq_info[0].queue_id = rx_bufq->queue_id;
+ rxq_info[0].ring_len = rx_bufq->nb_rx_desc;
+ rxq_info[0].dma_ring_addr = rx_bufq->rx_ring_phys_addr;
+ rxq_info[0].desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ rxq_info[0].rx_buffer_low_watermark = CPFL_RXBUF_LOW_WATERMARK;
+ rxq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ rxq_info[0].data_buffer_size = rx_bufq->rx_buf_len;
+ rxq_info[0].buffer_notif_stride = CPFL_RX_BUF_STRIDE;
+
+ return idpf_vc_rxq_config_by_info(&cpfl_vport->base, rxq_info, 1);
+}
+
+int
+cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq)
+{
+ struct virtchnl2_rxq_info rxq_info[1] = {0};
+ struct idpf_rx_queue *rxq = &cpfl_rxq->base;
+
+ rxq_info[0].type = VIRTCHNL2_QUEUE_TYPE_RX;
+ rxq_info[0].queue_id = rxq->queue_id;
+ rxq_info[0].ring_len = rxq->nb_rx_desc;
+ rxq_info[0].dma_ring_addr = rxq->rx_ring_phys_addr;
+ rxq_info[0].rx_bufq1_id = rxq->bufq1->queue_id;
+ rxq_info[0].max_pkt_size = vport->max_pkt_len;
+ rxq_info[0].desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ rxq_info[0].qflags |= VIRTCHNL2_RX_DESC_SIZE_16BYTE;
+
+ rxq_info[0].data_buffer_size = rxq->rx_buf_len;
+ rxq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ rxq_info[0].rx_buffer_low_watermark = CPFL_RXBUF_LOW_WATERMARK;
+
+ PMD_DRV_LOG(NOTICE, "hairpin: vport %u, Rxq id 0x%x",
+ vport->vport_id, rxq_info[0].queue_id);
+
+ return idpf_vc_rxq_config_by_info(vport, rxq_info, 1);
+}
+
+int
+cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport)
+{
+ struct idpf_tx_queue *tx_complq = cpfl_vport->p2p_tx_complq;
+ struct virtchnl2_txq_info txq_info[1] = {0};
+
+ txq_info[0].dma_ring_addr = tx_complq->tx_ring_phys_addr;
+ txq_info[0].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ txq_info[0].queue_id = tx_complq->queue_id;
+ txq_info[0].ring_len = tx_complq->nb_tx_desc;
+ txq_info[0].peer_rx_queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+ txq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ txq_info[0].sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+
+ return idpf_vc_txq_config_by_info(&cpfl_vport->base, txq_info, 1);
+}
+
+int
+cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq)
+{
+ struct idpf_tx_queue *txq = &cpfl_txq->base;
+ struct virtchnl2_txq_info txq_info[1] = {0};
+
+ txq_info[0].dma_ring_addr = txq->tx_ring_phys_addr;
+ txq_info[0].type = VIRTCHNL2_QUEUE_TYPE_TX;
+ txq_info[0].queue_id = txq->queue_id;
+ txq_info[0].ring_len = txq->nb_tx_desc;
+ txq_info[0].tx_compl_queue_id = txq->complq->queue_id;
+ txq_info[0].relative_queue_id = txq->queue_id;
+ txq_info[0].peer_rx_queue_id = cpfl_txq->hairpin_info.peer_rxq_id;
+ txq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ txq_info[0].sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+
+ return idpf_vc_txq_config_by_info(vport, txq_info, 1);
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 06198d4aad..872ebc1bfd 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -32,12 +32,15 @@
#define CPFL_RING_BASE_ALIGN 128
#define CPFL_DEFAULT_RX_FREE_THRESH 32
+#define CPFL_RXBUF_LOW_WATERMARK 64
#define CPFL_DEFAULT_TX_RS_THRESH 32
#define CPFL_DEFAULT_TX_FREE_THRESH 32
#define CPFL_SUPPORT_CHAIN_NUM 5
+#define CPFL_RX_BUF_STRIDE 64
+
struct cpfl_rxq_hairpin_info {
bool hairpin_q; /* if rx queue is a hairpin queue */
uint16_t peer_txp;
@@ -95,4 +98,8 @@ int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
int cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc,
const struct rte_eth_hairpin_conf *conf);
+int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport);
+int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq);
+int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
+int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
#endif /* _CPFL_RXTX_H_ */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v8 08/14] common/idpf: add switch queue API
2023-06-05 6:17 ` [PATCH v8 00/14] net/cpfl: add hairpin queue support beilei.xing
` (6 preceding siblings ...)
2023-06-05 6:17 ` [PATCH v8 07/14] net/cpfl: support hairpin queue configuration beilei.xing
@ 2023-06-05 6:17 ` beilei.xing
2023-06-05 6:17 ` [PATCH v8 09/14] net/cpfl: support hairpin queue start/stop beilei.xing
` (6 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 6:17 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds idpf_vc_ena_dis_one_queue API.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 2 +-
drivers/common/idpf/idpf_common_virtchnl.h | 3 +++
drivers/common/idpf/version.map | 1 +
3 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index 211b44a88e..6455f640da 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -733,7 +733,7 @@ idpf_vc_vectors_dealloc(struct idpf_vport *vport)
return err;
}
-static int
+int
idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
uint32_t type, bool on)
{
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index db83761a5e..9ff5c38c26 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -71,6 +71,9 @@ __rte_internal
int idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
uint16_t num_qs);
__rte_internal
+int idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
+ uint32_t type, bool on);
+__rte_internal
int idpf_vc_queue_grps_del(struct idpf_vport *vport,
uint16_t num_q_grps,
struct virtchnl2_queue_group_id *qg_ids);
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 17e77884ce..25624732b0 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -40,6 +40,7 @@ INTERNAL {
idpf_vc_cmd_execute;
idpf_vc_ctlq_post_rx_buffs;
idpf_vc_ctlq_recv;
+ idpf_vc_ena_dis_one_queue;
idpf_vc_irq_map_unmap_config;
idpf_vc_one_msg_read;
idpf_vc_ptype_info_query;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v8 09/14] net/cpfl: support hairpin queue start/stop
2023-06-05 6:17 ` [PATCH v8 00/14] net/cpfl: add hairpin queue support beilei.xing
` (7 preceding siblings ...)
2023-06-05 6:17 ` [PATCH v8 08/14] common/idpf: add switch queue API beilei.xing
@ 2023-06-05 6:17 ` beilei.xing
2023-06-05 6:17 ` [PATCH v8 10/14] common/idpf: add irq map config API beilei.xing
` (5 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 6:17 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx hairpin queue start/stop.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 46 +++++++++
drivers/net/cpfl/cpfl_rxtx.c | 164 +++++++++++++++++++++++++++++----
drivers/net/cpfl/cpfl_rxtx.h | 15 +++
3 files changed, 207 insertions(+), 18 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index a06def06d0..2b99e58341 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -896,6 +896,52 @@ cpfl_start_queues(struct rte_eth_dev *dev)
}
}
+ /* For non-manual bind hairpin queues, enable Tx queue and Rx queue,
+ * then enable Tx completion queue and Rx buffer queue.
+ */
+ for (i = cpfl_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq->hairpin_info.hairpin_q && !cpfl_vport->p2p_manual_bind) {
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ i - cpfl_vport->nb_data_txq,
+ false, true);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u on",
+ i);
+ else
+ cpfl_txq->base.q_started = true;
+ }
+ }
+
+ for (i = cpfl_vport->nb_data_rxq; i < dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q && !cpfl_vport->p2p_manual_bind) {
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ i - cpfl_vport->nb_data_rxq,
+ true, true);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u on",
+ i);
+ else
+ cpfl_rxq->base.q_started = true;
+ }
+ }
+
+ if (!cpfl_vport->p2p_manual_bind &&
+ cpfl_vport->p2p_tx_complq != NULL &&
+ cpfl_vport->p2p_rx_bufq != NULL) {
+ err = cpfl_switch_hairpin_complq(cpfl_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq");
+ return err;
+ }
+ err = cpfl_switch_hairpin_bufq(cpfl_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Rx bufq");
+ return err;
+ }
+ }
+
return err;
}
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 9408c6e1a4..8d1f8a560b 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -1002,6 +1002,89 @@ cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq
return idpf_vc_txq_config_by_info(vport, txq_info, 1);
}
+int
+cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ queue_id = cpfl_vport->p2p_tx_complq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
+int
+cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
+int
+cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t logic_qid,
+ bool rx, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = rx ? VIRTCHNL2_QUEUE_TYPE_RX : VIRTCHNL2_QUEUE_TYPE_TX;
+
+ if (type == VIRTCHNL2_QUEUE_TYPE_RX)
+ queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid, logic_qid);
+ else
+ queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_start_qid, logic_qid);
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+ if (err)
+ return err;
+
+ return err;
+}
+
+static int
+cpfl_alloc_split_p2p_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+ volatile struct virtchnl2_p2p_rx_buf_desc *rxd;
+ struct rte_mbuf *mbuf = NULL;
+ uint64_t dma_addr;
+ uint16_t i;
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ mbuf = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!mbuf)) {
+ PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+ return -ENOMEM;
+ }
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+ rxd = &((volatile struct virtchnl2_p2p_rx_buf_desc *)(rxq->rx_ring))[i];
+ rxd->reserve0 = 0;
+ rxd->pkt_addr = dma_addr;
+ }
+
+ rxq->nb_rx_hold = 0;
+ /* The value written in the RX buffer queue tail register, must be a multiple of 8.*/
+ rxq->rx_tail = rxq->nb_rx_desc - CPFL_HAIRPIN_Q_TAIL_AUX_VALUE;
+
+ return 0;
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
@@ -1055,22 +1138,31 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
IDPF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
} else {
/* Split queue */
- err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
- return err;
- }
- err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
- return err;
+ if (cpfl_rxq->hairpin_info.hairpin_q) {
+ err = cpfl_alloc_split_p2p_rxq_mbufs(rxq->bufq1);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate p2p RX buffer queue mbuf");
+ return err;
+ }
+ } else {
+ err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
+ return err;
+ }
+ err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
+ return err;
+ }
}
rte_wmb();
/* Init the RX tail register. */
IDPF_PCI_REG_WRITE(rxq->bufq1->qrx_tail, rxq->bufq1->rx_tail);
- IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail);
+ if (rxq->bufq2)
+ IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail);
}
return err;
@@ -1177,7 +1269,12 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return -EINVAL;
cpfl_rxq = dev->data->rx_queues[rx_queue_id];
- err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ rx_queue_id - cpfl_vport->nb_data_txq,
+ true, false);
+ else
+ err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
rx_queue_id);
@@ -1191,10 +1288,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
idpf_qc_single_rx_queue_reset(rxq);
} else {
rxq->bufq1->ops->release_mbufs(rxq->bufq1);
- rxq->bufq2->ops->release_mbufs(rxq->bufq2);
- idpf_qc_split_rx_queue_reset(rxq);
+ if (rxq->bufq2)
+ rxq->bufq2->ops->release_mbufs(rxq->bufq2);
+ if (cpfl_rxq->hairpin_info.hairpin_q) {
+ cpfl_rx_hairpin_descq_reset(rxq);
+ cpfl_rx_hairpin_bufq_reset(rxq->bufq1);
+ } else {
+ idpf_qc_split_rx_queue_reset(rxq);
+ }
}
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ if (!cpfl_rxq->hairpin_info.hairpin_q)
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1213,7 +1317,12 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
cpfl_txq = dev->data->tx_queues[tx_queue_id];
- err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
+ if (cpfl_txq->hairpin_info.hairpin_q)
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ tx_queue_id - cpfl_vport->nb_data_txq,
+ false, false);
+ else
+ err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
tx_queue_id);
@@ -1226,10 +1335,17 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
idpf_qc_single_tx_queue_reset(txq);
} else {
- idpf_qc_split_tx_descq_reset(txq);
- idpf_qc_split_tx_complq_reset(txq->complq);
+ if (cpfl_txq->hairpin_info.hairpin_q) {
+ cpfl_tx_hairpin_descq_reset(txq);
+ cpfl_tx_hairpin_complq_reset(txq->complq);
+ } else {
+ idpf_qc_split_tx_descq_reset(txq);
+ idpf_qc_split_tx_complq_reset(txq->complq);
+ }
}
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ if (!cpfl_txq->hairpin_info.hairpin_q)
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1249,10 +1365,22 @@ cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
void
cpfl_stop_queues(struct rte_eth_dev *dev)
{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
struct cpfl_rx_queue *cpfl_rxq;
struct cpfl_tx_queue *cpfl_txq;
int i;
+ if (cpfl_vport->p2p_tx_complq != NULL) {
+ if (cpfl_switch_hairpin_complq(cpfl_vport, false) != 0)
+ PMD_DRV_LOG(ERR, "Failed to stop hairpin Tx complq");
+ }
+
+ if (cpfl_vport->p2p_rx_bufq != NULL) {
+ if (cpfl_switch_hairpin_bufq(cpfl_vport, false) != 0)
+ PMD_DRV_LOG(ERR, "Failed to stop hairpin Rx bufq");
+ }
+
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
if (cpfl_rxq == NULL)
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 872ebc1bfd..aacd087b56 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -41,6 +41,17 @@
#define CPFL_RX_BUF_STRIDE 64
+/* The value written in the RX buffer queue tail register,
+ * and in WritePTR field in the TX completion queue context,
+ * must be a multiple of 8.
+ */
+#define CPFL_HAIRPIN_Q_TAIL_AUX_VALUE 8
+
+struct virtchnl2_p2p_rx_buf_desc {
+ __le64 reserve0;
+ __le64 pkt_addr; /* Packet buffer address */
+};
+
struct cpfl_rxq_hairpin_info {
bool hairpin_q; /* if rx queue is a hairpin queue */
uint16_t peer_txp;
@@ -102,4 +113,8 @@ int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq);
int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
+int cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t qid,
+ bool rx, bool on);
#endif /* _CPFL_RXTX_H_ */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v8 10/14] common/idpf: add irq map config API
2023-06-05 6:17 ` [PATCH v8 00/14] net/cpfl: add hairpin queue support beilei.xing
` (8 preceding siblings ...)
2023-06-05 6:17 ` [PATCH v8 09/14] net/cpfl: support hairpin queue start/stop beilei.xing
@ 2023-06-05 6:17 ` beilei.xing
2023-06-05 6:17 ` [PATCH v8 11/14] net/cpfl: enable write back based on ITR expire beilei.xing
` (4 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 6:17 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch supports idpf_vport_irq_map_config_by_qids API.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_device.c | 75 ++++++++++++++++++++++++
drivers/common/idpf/idpf_common_device.h | 4 ++
drivers/common/idpf/version.map | 1 +
3 files changed, 80 insertions(+)
diff --git a/drivers/common/idpf/idpf_common_device.c b/drivers/common/idpf/idpf_common_device.c
index dc47551b17..cc4207a46e 100644
--- a/drivers/common/idpf/idpf_common_device.c
+++ b/drivers/common/idpf/idpf_common_device.c
@@ -667,6 +667,81 @@ idpf_vport_irq_map_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
return ret;
}
+int
+idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport, uint32_t *qids, uint16_t nb_rx_queues)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_queue_vector *qv_map;
+ struct idpf_hw *hw = &adapter->hw;
+ uint32_t dynctl_val, itrn_val;
+ uint32_t dynctl_reg_start;
+ uint32_t itrn_reg_start;
+ uint16_t i;
+ int ret;
+
+ qv_map = rte_zmalloc("qv_map",
+ nb_rx_queues *
+ sizeof(struct virtchnl2_queue_vector), 0);
+ if (qv_map == NULL) {
+ DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
+ nb_rx_queues);
+ ret = -ENOMEM;
+ goto qv_map_alloc_err;
+ }
+
+ /* Rx interrupt disabled, Map interrupt only for writeback */
+
+ /* The capability flags adapter->caps.other_caps should be
+ * compared with bit VIRTCHNL2_CAP_WB_ON_ITR here. The if
+ * condition should be updated when the FW can return the
+ * correct flag bits.
+ */
+ dynctl_reg_start =
+ vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
+ itrn_reg_start =
+ vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
+ dynctl_val = IDPF_READ_REG(hw, dynctl_reg_start);
+ DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x", dynctl_val);
+ itrn_val = IDPF_READ_REG(hw, itrn_reg_start);
+ DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val);
+ /* Force write-backs by setting WB_ON_ITR bit in DYN_CTL
+ * register. WB_ON_ITR and INTENA are mutually exclusive
+ * bits. Setting WB_ON_ITR bits means TX and RX Descs
+ * are written back based on ITR expiration irrespective
+ * of INTENA setting.
+ */
+ /* TBD: need to tune INTERVAL value for better performance. */
+ itrn_val = (itrn_val == 0) ? IDPF_DFLT_INTERVAL : itrn_val;
+ dynctl_val = VIRTCHNL2_ITR_IDX_0 <<
+ PF_GLINT_DYN_CTL_ITR_INDX_S |
+ PF_GLINT_DYN_CTL_WB_ON_ITR_M |
+ itrn_val << PF_GLINT_DYN_CTL_INTERVAL_S;
+ IDPF_WRITE_REG(hw, dynctl_reg_start, dynctl_val);
+
+ for (i = 0; i < nb_rx_queues; i++) {
+ /* map all queues to the same vector */
+ qv_map[i].queue_id = qids[i];
+ qv_map[i].vector_id =
+ vport->recv_vectors->vchunks.vchunks->start_vector_id;
+ }
+ vport->qv_map = qv_map;
+
+ ret = idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, true);
+ if (ret != 0) {
+ DRV_LOG(ERR, "config interrupt mapping failed");
+ goto config_irq_map_err;
+ }
+
+ return 0;
+
+config_irq_map_err:
+ rte_free(vport->qv_map);
+ vport->qv_map = NULL;
+
+qv_map_alloc_err:
+ return ret;
+}
+
int
idpf_vport_irq_unmap_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
{
diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index 112367dae8..f767ea7cec 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -200,5 +200,9 @@ int idpf_vport_info_init(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_info);
__rte_internal
void idpf_vport_stats_update(struct virtchnl2_vport_stats *oes, struct virtchnl2_vport_stats *nes);
+__rte_internal
+int idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport,
+ uint32_t *qids,
+ uint16_t nb_rx_queues);
#endif /* _IDPF_COMMON_DEVICE_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 25624732b0..0729f6b912 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -69,6 +69,7 @@ INTERNAL {
idpf_vport_info_init;
idpf_vport_init;
idpf_vport_irq_map_config;
+ idpf_vport_irq_map_config_by_qids;
idpf_vport_irq_unmap_config;
idpf_vport_rss_config;
idpf_vport_stats_update;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v8 11/14] net/cpfl: enable write back based on ITR expire
2023-06-05 6:17 ` [PATCH v8 00/14] net/cpfl: add hairpin queue support beilei.xing
` (9 preceding siblings ...)
2023-06-05 6:17 ` [PATCH v8 10/14] common/idpf: add irq map config API beilei.xing
@ 2023-06-05 6:17 ` beilei.xing
2023-06-05 6:17 ` [PATCH v8 12/14] net/cpfl: support peer ports get beilei.xing
` (3 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 6:17 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch enables write back based on ITR expire
(WR_ON_ITR) for hairpin queues.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 2b99e58341..850f1c0bc6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -735,11 +735,22 @@ cpfl_dev_configure(struct rte_eth_dev *dev)
static int
cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
{
+ uint32_t qids[CPFL_MAX_P2P_NB_QUEUES + IDPF_DEFAULT_RXQ_NUM] = {0};
struct cpfl_vport *cpfl_vport = dev->data->dev_private;
struct idpf_vport *vport = &cpfl_vport->base;
uint16_t nb_rx_queues = dev->data->nb_rx_queues;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i;
- return idpf_vport_irq_map_config(vport, nb_rx_queues);
+ for (i = 0; i < nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ qids[i] = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid,
+ (i - cpfl_vport->nb_data_rxq));
+ else
+ qids[i] = cpfl_hw_qid_get(vport->chunks_info.rx_start_qid, i);
+ }
+ return idpf_vport_irq_map_config_by_qids(vport, qids, nb_rx_queues);
}
/* Update hairpin_info for dev's tx hairpin queue */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v8 12/14] net/cpfl: support peer ports get
2023-06-05 6:17 ` [PATCH v8 00/14] net/cpfl: add hairpin queue support beilei.xing
` (10 preceding siblings ...)
2023-06-05 6:17 ` [PATCH v8 11/14] net/cpfl: enable write back based on ITR expire beilei.xing
@ 2023-06-05 6:17 ` beilei.xing
2023-06-05 11:22 ` Wu, Jingjing
2023-06-05 6:17 ` [PATCH v8 13/14] net/cpfl: support hairpin bind/unbind beilei.xing
` (2 subsequent siblings)
14 siblings, 1 reply; 164+ messages in thread
From: beilei.xing @ 2023-06-05 6:17 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports get hairpin peer ports.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 41 ++++++++++++++++++++++++++++++++++
1 file changed, 41 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 850f1c0bc6..1a1ca4bc77 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1080,6 +1080,46 @@ cpfl_dev_close(struct rte_eth_dev *dev)
return 0;
}
+static int
+cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
+ size_t len, uint32_t tx)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_tx_queue *txq;
+ struct idpf_rx_queue *rxq;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i;
+ int j = 0;
+
+ if (len <= 0)
+ return -EINVAL;
+
+ if (cpfl_vport->p2p_q_chunks_info == NULL)
+ return -ENOTSUP;
+
+ if (tx > 0) {
+ for (i = cpfl_vport->nb_data_txq, j = 0; i < dev->data->nb_tx_queues; i++, j++) {
+ txq = dev->data->tx_queues[i];
+ if (txq == NULL)
+ return -EINVAL;
+ cpfl_txq = (struct cpfl_tx_queue *)txq;
+ peer_ports[j] = cpfl_txq->hairpin_info.peer_rxp;
+ }
+ } else if (tx == 0) {
+ for (i = cpfl_vport->nb_data_rxq, j = 0; i < dev->data->nb_rx_queues; i++, j++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq == NULL)
+ return -EINVAL;
+ cpfl_rxq = (struct cpfl_rx_queue *)rxq;
+ peer_ports[j] = cpfl_rxq->hairpin_info.peer_txp;
+ }
+ }
+
+ return j;
+}
+
static const struct eth_dev_ops cpfl_eth_dev_ops = {
.dev_configure = cpfl_dev_configure,
.dev_close = cpfl_dev_close,
@@ -1109,6 +1149,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.hairpin_cap_get = cpfl_hairpin_cap_get,
.rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
.tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
+ .hairpin_get_peer_ports = cpfl_hairpin_get_peer_ports,
};
static int
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v8 13/14] net/cpfl: support hairpin bind/unbind
2023-06-05 6:17 ` [PATCH v8 00/14] net/cpfl: add hairpin queue support beilei.xing
` (11 preceding siblings ...)
2023-06-05 6:17 ` [PATCH v8 12/14] net/cpfl: support peer ports get beilei.xing
@ 2023-06-05 6:17 ` beilei.xing
2023-06-05 6:17 ` [PATCH v8 14/14] doc: update the doc of CPFL PMD beilei.xing
2023-06-05 9:06 ` [PATCH v9 00/14] net/cpfl: add hairpin queue support beilei.xing
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 6:17 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports hairpin_bind/unbind ops.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 137 +++++++++++++++++++++++++++++++++
1 file changed, 137 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 1a1ca4bc77..0d127eae3e 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1120,6 +1120,141 @@ cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
return j;
}
+static int
+cpfl_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct idpf_vport *tx_vport = &cpfl_tx_vport->base;
+ struct cpfl_vport *cpfl_rx_vport;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct rte_eth_dev *peer_dev;
+ struct idpf_vport *rx_vport;
+ int err = 0;
+ int i;
+
+ err = cpfl_txq_hairpin_info_update(dev, rx_port);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to update Tx hairpin queue info.");
+ return err;
+ }
+
+ /* configure hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ err = cpfl_hairpin_txq_config(tx_vport, cpfl_txq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u", i);
+ return err;
+ }
+ }
+
+ err = cpfl_hairpin_tx_complq_config(cpfl_tx_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Tx completion queue");
+ return err;
+ }
+
+ peer_dev = &rte_eth_devices[rx_port];
+ cpfl_rx_vport = (struct cpfl_vport *)peer_dev->data->dev_private;
+ rx_vport = &cpfl_rx_vport->base;
+ cpfl_rxq_hairpin_mz_bind(peer_dev);
+
+ err = cpfl_hairpin_rx_bufq_config(cpfl_rx_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue");
+ return err;
+ }
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ err = cpfl_hairpin_rxq_config(rx_vport, cpfl_rxq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue %u", i);
+ return err;
+ }
+ err = cpfl_rx_queue_init(peer_dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u", i);
+ return err;
+ }
+ }
+
+ /* enable hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport,
+ i - cpfl_tx_vport->nb_data_txq,
+ false, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u on",
+ i);
+ return err;
+ }
+ cpfl_txq->base.q_started = true;
+ }
+
+ err = cpfl_switch_hairpin_complq(cpfl_tx_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq");
+ return err;
+ }
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport,
+ i - cpfl_rx_vport->nb_data_rxq,
+ true, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u on",
+ i);
+ }
+ cpfl_rxq->base.q_started = true;
+ }
+
+ err = cpfl_switch_hairpin_bufq(cpfl_rx_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Rx buffer queue");
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+cpfl_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port];
+ struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i;
+
+ /* disable hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport,
+ i - cpfl_tx_vport->nb_data_txq,
+ false, false);
+ cpfl_txq->base.q_started = false;
+ }
+
+ cpfl_switch_hairpin_complq(cpfl_tx_vport, false);
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport,
+ i - cpfl_rx_vport->nb_data_rxq,
+ true, false);
+ cpfl_rxq->base.q_started = false;
+ }
+
+ cpfl_switch_hairpin_bufq(cpfl_rx_vport, false);
+
+ return 0;
+}
+
static const struct eth_dev_ops cpfl_eth_dev_ops = {
.dev_configure = cpfl_dev_configure,
.dev_close = cpfl_dev_close,
@@ -1150,6 +1285,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
.tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
.hairpin_get_peer_ports = cpfl_hairpin_get_peer_ports,
+ .hairpin_bind = cpfl_hairpin_bind,
+ .hairpin_unbind = cpfl_hairpin_unbind,
};
static int
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v8 14/14] doc: update the doc of CPFL PMD
2023-06-05 6:17 ` [PATCH v8 00/14] net/cpfl: add hairpin queue support beilei.xing
` (12 preceding siblings ...)
2023-06-05 6:17 ` [PATCH v8 13/14] net/cpfl: support hairpin bind/unbind beilei.xing
@ 2023-06-05 6:17 ` beilei.xing
2023-06-05 9:06 ` [PATCH v9 00/14] net/cpfl: add hairpin queue support beilei.xing
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 6:17 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
Update cpfl.rst to clarify hairpin support.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
doc/guides/nics/cpfl.rst | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index d25db088eb..8d5c3082e4 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -106,3 +106,10 @@ The paths are chosen based on 2 conditions:
A value "P" means the offload feature is not supported by vector path.
If any not supported features are used, cpfl vector PMD is disabled
and the scalar paths are chosen.
+
+Hairpin queue
+~~~~~~~~~~~~~
+
+ E2100 Series can loopback packets from RX port to TX port, this feature is
+ called port-to-port or hairpin.
+ Currently, the PMD only supports single port hairpin.
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH v8 03/14] net/cpfl: add haipin queue group during vport init
2023-06-05 6:17 ` [PATCH v8 03/14] net/cpfl: add haipin queue group during vport init beilei.xing
@ 2023-06-05 8:35 ` Wu, Jingjing
2023-06-05 8:53 ` Xing, Beilei
0 siblings, 1 reply; 164+ messages in thread
From: Wu, Jingjing @ 2023-06-05 8:35 UTC (permalink / raw)
To: Xing, Beilei; +Cc: dev, Liu, Mingxia
> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Monday, June 5, 2023 2:17 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Subject: [PATCH v8 03/14] net/cpfl: add haipin queue group during vport init
>
> From: Beilei Xing <beilei.xing@intel.com>
>
> This patch adds haipin queue group during vport init.
>
> Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/net/cpfl/cpfl_ethdev.c | 133 +++++++++++++++++++++++++++++++++
> drivers/net/cpfl/cpfl_ethdev.h | 18 +++++
> drivers/net/cpfl/cpfl_rxtx.h | 7 ++
> 3 files changed, 158 insertions(+)
>
> diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
> index e587155db6..c1273a7478 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.c
> +++ b/drivers/net/cpfl/cpfl_ethdev.c
> @@ -840,6 +840,20 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
> return 0;
> }
>
> +static int
> +cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
> +{
> + struct virtchnl2_queue_group_id qg_ids[CPFL_P2P_NB_QUEUE_GRPS] = {0};
> + int ret = 0;
> +
> + qg_ids[0].queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
> + qg_ids[0].queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
> + ret = idpf_vc_queue_grps_del(vport, CPFL_P2P_NB_QUEUE_GRPS, qg_ids);
> + if (ret)
> + PMD_DRV_LOG(ERR, "Failed to delete p2p queue groups");
> + return ret;
> +}
> +
> static int
> cpfl_dev_close(struct rte_eth_dev *dev)
> {
> @@ -848,7 +862,12 @@ cpfl_dev_close(struct rte_eth_dev *dev)
> struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
>
> cpfl_dev_stop(dev);
> +
> + if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
> + cpfl_p2p_queue_grps_del(vport);
> +
> idpf_vport_deinit(vport);
> + rte_free(cpfl_vport->p2p_q_chunks_info);
>
> adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
> adapter->cur_vport_nb--;
> @@ -1284,6 +1303,96 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter)
> return vport_idx;
> }
>
> +static int
> +cpfl_p2p_q_grps_add(struct idpf_vport *vport,
> + struct virtchnl2_add_queue_groups *p2p_queue_grps_info,
> + uint8_t *p2p_q_vc_out_info)
> +{
> + int ret;
> +
> + p2p_queue_grps_info->vport_id = vport->vport_id;
> + p2p_queue_grps_info->qg_info.num_queue_groups =
> CPFL_P2P_NB_QUEUE_GRPS;
> + p2p_queue_grps_info->qg_info.groups[0].num_rx_q =
> CPFL_MAX_P2P_NB_QUEUES;
> + p2p_queue_grps_info->qg_info.groups[0].num_rx_bufq =
> CPFL_P2P_NB_RX_BUFQ;
> + p2p_queue_grps_info->qg_info.groups[0].num_tx_q =
> CPFL_MAX_P2P_NB_QUEUES;
> + p2p_queue_grps_info->qg_info.groups[0].num_tx_complq =
> CPFL_P2P_NB_TX_COMPLQ;
> + p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_id =
> CPFL_P2P_QUEUE_GRP_ID;
> + p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_type =
> VIRTCHNL2_QUEUE_GROUP_P2P;
> + p2p_queue_grps_info->qg_info.groups[0].rx_q_grp_info.rss_lut_size = 0;
> + p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.tx_tc = 0;
> + p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.priority = 0;
> + p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.is_sp = 0;
> + p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.pir_weight = 0;
> +
> + ret = idpf_vc_queue_grps_add(vport, p2p_queue_grps_info,
> p2p_q_vc_out_info);
> + if (ret != 0) {
> + PMD_DRV_LOG(ERR, "Failed to add p2p queue groups.");
> + return ret;
> + }
> +
> + return ret;
> +}
> +
> +static int
> +cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
> + struct virtchnl2_add_queue_groups *p2p_q_vc_out_info)
> +{
> + struct p2p_queue_chunks_info *p2p_q_chunks_info = cpfl_vport-
> >p2p_q_chunks_info;
> + struct virtchnl2_queue_reg_chunks *vc_chunks_out;
> + int i, type;
> +
> + if (p2p_q_vc_out_info->qg_info.groups[0].qg_id.queue_group_type !=
> + VIRTCHNL2_QUEUE_GROUP_P2P) {
> + PMD_DRV_LOG(ERR, "Add queue group response mismatch.");
> + return -EINVAL;
> + }
> +
> + vc_chunks_out = &p2p_q_vc_out_info->qg_info.groups[0].chunks;
> +
> + for (i = 0; i < vc_chunks_out->num_chunks; i++) {
> + type = vc_chunks_out->chunks[i].type;
> + switch (type) {
> + case VIRTCHNL2_QUEUE_TYPE_TX:
> + p2p_q_chunks_info->tx_start_qid =
> + vc_chunks_out->chunks[i].start_queue_id;
> + p2p_q_chunks_info->tx_qtail_start =
> + vc_chunks_out->chunks[i].qtail_reg_start;
> + p2p_q_chunks_info->tx_qtail_spacing =
> + vc_chunks_out->chunks[i].qtail_reg_spacing;
> + break;
> + case VIRTCHNL2_QUEUE_TYPE_RX:
> + p2p_q_chunks_info->rx_start_qid =
> + vc_chunks_out->chunks[i].start_queue_id;
> + p2p_q_chunks_info->rx_qtail_start =
> + vc_chunks_out->chunks[i].qtail_reg_start;
> + p2p_q_chunks_info->rx_qtail_spacing =
> + vc_chunks_out->chunks[i].qtail_reg_spacing;
> + break;
> + case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
> + p2p_q_chunks_info->tx_compl_start_qid =
> + vc_chunks_out->chunks[i].start_queue_id;
> + p2p_q_chunks_info->tx_compl_qtail_start =
> + vc_chunks_out->chunks[i].qtail_reg_start;
> + p2p_q_chunks_info->tx_compl_qtail_spacing =
> + vc_chunks_out->chunks[i].qtail_reg_spacing;
> + break;
> + case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
> + p2p_q_chunks_info->rx_buf_start_qid =
> + vc_chunks_out->chunks[i].start_queue_id;
> + p2p_q_chunks_info->rx_buf_qtail_start =
> + vc_chunks_out->chunks[i].qtail_reg_start;
> + p2p_q_chunks_info->rx_buf_qtail_spacing =
> + vc_chunks_out->chunks[i].qtail_reg_spacing;
> + break;
> + default:
> + PMD_DRV_LOG(ERR, "Unsupported queue type");
> + break;
> + }
> + }
> +
> + return 0;
> +}
> +
> static int
> cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
> {
> @@ -1293,6 +1402,8 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void
> *init_params)
> struct cpfl_adapter_ext *adapter = param->adapter;
> /* for sending create vport virtchnl msg prepare */
> struct virtchnl2_create_vport create_vport_info;
> + struct virtchnl2_add_queue_groups p2p_queue_grps_info;
> + uint8_t p2p_q_vc_out_info[IDPF_DFLT_MBX_BUF_SIZE] = {0};
> int ret = 0;
>
> dev->dev_ops = &cpfl_eth_dev_ops;
> @@ -1327,6 +1438,28 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void
> *init_params)
> rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
> &dev->data->mac_addrs[0]);
>
> + if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
> + memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
> + ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info,
> p2p_q_vc_out_info);
> + if (ret != 0) {
> + PMD_INIT_LOG(ERR, "Failed to add p2p queue group.");
> + return 0;
> + }
> + cpfl_vport->p2p_q_chunks_info = rte_zmalloc(NULL,
> + sizeof(struct p2p_queue_chunks_info),
> 0);
> + if (cpfl_vport->p2p_q_chunks_info == NULL) {
> + PMD_INIT_LOG(ERR, "Failed to allocate p2p queue info.");
> + cpfl_p2p_queue_grps_del(vport);
> + return 0;
> + }
> + ret = cpfl_p2p_queue_info_init(cpfl_vport,
> + (struct virtchnl2_add_queue_groups
> *)p2p_q_vc_out_info);
> + if (ret != 0) {
> + PMD_INIT_LOG(ERR, "Failed to init p2p queue info.");
> + cpfl_p2p_queue_grps_del(vport);
Forgot to free p2p_q_chunks_info?
And better to use WARNING, as it is not returned with negative value.
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH v8 03/14] net/cpfl: add haipin queue group during vport init
2023-06-05 8:35 ` Wu, Jingjing
@ 2023-06-05 8:53 ` Xing, Beilei
0 siblings, 0 replies; 164+ messages in thread
From: Xing, Beilei @ 2023-06-05 8:53 UTC (permalink / raw)
To: Wu, Jingjing; +Cc: dev, Liu, Mingxia
> -----Original Message-----
> From: Wu, Jingjing <jingjing.wu@intel.com>
> Sent: Monday, June 5, 2023 4:36 PM
> To: Xing, Beilei <beilei.xing@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>
> Subject: RE: [PATCH v8 03/14] net/cpfl: add haipin queue group during vport
> init
>
>
>
> > -----Original Message-----
> > From: Xing, Beilei <beilei.xing@intel.com>
> > Sent: Monday, June 5, 2023 2:17 PM
> > To: Wu, Jingjing <jingjing.wu@intel.com>
> > Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> > <beilei.xing@intel.com>
> > Subject: [PATCH v8 03/14] net/cpfl: add haipin queue group during
> > vport init
> >
> > From: Beilei Xing <beilei.xing@intel.com>
> >
> > This patch adds haipin queue group during vport init.
> >
> > Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
> > Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> > ---
> > drivers/net/cpfl/cpfl_ethdev.c | 133
> > +++++++++++++++++++++++++++++++++ drivers/net/cpfl/cpfl_ethdev.h | 18
> +++++
> > drivers/net/cpfl/cpfl_rxtx.h | 7 ++
> > 3 files changed, 158 insertions(+)
> >
> > diff --git a/drivers/net/cpfl/cpfl_ethdev.c
> > b/drivers/net/cpfl/cpfl_ethdev.c index e587155db6..c1273a7478 100644
> > --- a/drivers/net/cpfl/cpfl_ethdev.c
> > +++ b/drivers/net/cpfl/cpfl_ethdev.c
> > @@ -840,6 +840,20 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
> > return 0;
> > }
> >
> > +static int
> > +cpfl_p2p_queue_grps_del(struct idpf_vport *vport) {
> > + struct virtchnl2_queue_group_id qg_ids[CPFL_P2P_NB_QUEUE_GRPS]
> = {0};
> > + int ret = 0;
> > +
> > + qg_ids[0].queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
> > + qg_ids[0].queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
> > + ret = idpf_vc_queue_grps_del(vport, CPFL_P2P_NB_QUEUE_GRPS,
> qg_ids);
> > + if (ret)
> > + PMD_DRV_LOG(ERR, "Failed to delete p2p queue groups");
> > + return ret;
> > +}
> > +
> > static int
> > cpfl_dev_close(struct rte_eth_dev *dev) { @@ -848,7 +862,12 @@
> > cpfl_dev_close(struct rte_eth_dev *dev)
> > struct cpfl_adapter_ext *adapter =
> > CPFL_ADAPTER_TO_EXT(vport->adapter);
> >
> > cpfl_dev_stop(dev);
> > +
> > + if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
> > + cpfl_p2p_queue_grps_del(vport);
> > +
> > idpf_vport_deinit(vport);
> > + rte_free(cpfl_vport->p2p_q_chunks_info);
> >
> > adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
> > adapter->cur_vport_nb--;
> > @@ -1284,6 +1303,96 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext
> *adapter)
> > return vport_idx;
> > }
> >
> > +static int
> > +cpfl_p2p_q_grps_add(struct idpf_vport *vport,
> > + struct virtchnl2_add_queue_groups *p2p_queue_grps_info,
> > + uint8_t *p2p_q_vc_out_info)
> > +{
> > + int ret;
> > +
> > + p2p_queue_grps_info->vport_id = vport->vport_id;
> > + p2p_queue_grps_info->qg_info.num_queue_groups =
> > CPFL_P2P_NB_QUEUE_GRPS;
> > + p2p_queue_grps_info->qg_info.groups[0].num_rx_q =
> > CPFL_MAX_P2P_NB_QUEUES;
> > + p2p_queue_grps_info->qg_info.groups[0].num_rx_bufq =
> > CPFL_P2P_NB_RX_BUFQ;
> > + p2p_queue_grps_info->qg_info.groups[0].num_tx_q =
> > CPFL_MAX_P2P_NB_QUEUES;
> > + p2p_queue_grps_info->qg_info.groups[0].num_tx_complq =
> > CPFL_P2P_NB_TX_COMPLQ;
> > + p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_id =
> > CPFL_P2P_QUEUE_GRP_ID;
> > + p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_type =
> > VIRTCHNL2_QUEUE_GROUP_P2P;
> > + p2p_queue_grps_info->qg_info.groups[0].rx_q_grp_info.rss_lut_size =
> 0;
> > + p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.tx_tc = 0;
> > + p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.priority = 0;
> > + p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.is_sp = 0;
> > + p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.pir_weight =
> 0;
> > +
> > + ret = idpf_vc_queue_grps_add(vport, p2p_queue_grps_info,
> > p2p_q_vc_out_info);
> > + if (ret != 0) {
> > + PMD_DRV_LOG(ERR, "Failed to add p2p queue groups.");
> > + return ret;
> > + }
> > +
> > + return ret;
> > +}
> > +
> > +static int
> > +cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
> > + struct virtchnl2_add_queue_groups
> *p2p_q_vc_out_info) {
> > + struct p2p_queue_chunks_info *p2p_q_chunks_info = cpfl_vport-
> > >p2p_q_chunks_info;
> > + struct virtchnl2_queue_reg_chunks *vc_chunks_out;
> > + int i, type;
> > +
> > + if (p2p_q_vc_out_info->qg_info.groups[0].qg_id.queue_group_type !=
> > + VIRTCHNL2_QUEUE_GROUP_P2P) {
> > + PMD_DRV_LOG(ERR, "Add queue group response mismatch.");
> > + return -EINVAL;
> > + }
> > +
> > + vc_chunks_out = &p2p_q_vc_out_info->qg_info.groups[0].chunks;
> > +
> > + for (i = 0; i < vc_chunks_out->num_chunks; i++) {
> > + type = vc_chunks_out->chunks[i].type;
> > + switch (type) {
> > + case VIRTCHNL2_QUEUE_TYPE_TX:
> > + p2p_q_chunks_info->tx_start_qid =
> > + vc_chunks_out->chunks[i].start_queue_id;
> > + p2p_q_chunks_info->tx_qtail_start =
> > + vc_chunks_out->chunks[i].qtail_reg_start;
> > + p2p_q_chunks_info->tx_qtail_spacing =
> > + vc_chunks_out->chunks[i].qtail_reg_spacing;
> > + break;
> > + case VIRTCHNL2_QUEUE_TYPE_RX:
> > + p2p_q_chunks_info->rx_start_qid =
> > + vc_chunks_out->chunks[i].start_queue_id;
> > + p2p_q_chunks_info->rx_qtail_start =
> > + vc_chunks_out->chunks[i].qtail_reg_start;
> > + p2p_q_chunks_info->rx_qtail_spacing =
> > + vc_chunks_out->chunks[i].qtail_reg_spacing;
> > + break;
> > + case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
> > + p2p_q_chunks_info->tx_compl_start_qid =
> > + vc_chunks_out->chunks[i].start_queue_id;
> > + p2p_q_chunks_info->tx_compl_qtail_start =
> > + vc_chunks_out->chunks[i].qtail_reg_start;
> > + p2p_q_chunks_info->tx_compl_qtail_spacing =
> > + vc_chunks_out->chunks[i].qtail_reg_spacing;
> > + break;
> > + case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
> > + p2p_q_chunks_info->rx_buf_start_qid =
> > + vc_chunks_out->chunks[i].start_queue_id;
> > + p2p_q_chunks_info->rx_buf_qtail_start =
> > + vc_chunks_out->chunks[i].qtail_reg_start;
> > + p2p_q_chunks_info->rx_buf_qtail_spacing =
> > + vc_chunks_out->chunks[i].qtail_reg_spacing;
> > + break;
> > + default:
> > + PMD_DRV_LOG(ERR, "Unsupported queue type");
> > + break;
> > + }
> > + }
> > +
> > + return 0;
> > +}
> > +
> > static int
> > cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params) { @@
> > -1293,6 +1402,8 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void
> > *init_params)
> > struct cpfl_adapter_ext *adapter = param->adapter;
> > /* for sending create vport virtchnl msg prepare */
> > struct virtchnl2_create_vport create_vport_info;
> > + struct virtchnl2_add_queue_groups p2p_queue_grps_info;
> > + uint8_t p2p_q_vc_out_info[IDPF_DFLT_MBX_BUF_SIZE] = {0};
> > int ret = 0;
> >
> > dev->dev_ops = &cpfl_eth_dev_ops;
> > @@ -1327,6 +1438,28 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev,
> > void
> > *init_params)
> > rte_ether_addr_copy((struct rte_ether_addr *)vport-
> >default_mac_addr,
> > &dev->data->mac_addrs[0]);
> >
> > + if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
> > + memset(&p2p_queue_grps_info, 0,
> sizeof(p2p_queue_grps_info));
> > + ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info,
> > p2p_q_vc_out_info);
> > + if (ret != 0) {
> > + PMD_INIT_LOG(ERR, "Failed to add p2p queue
> group.");
> > + return 0;
> > + }
> > + cpfl_vport->p2p_q_chunks_info = rte_zmalloc(NULL,
> > + sizeof(struct
> p2p_queue_chunks_info),
> > 0);
> > + if (cpfl_vport->p2p_q_chunks_info == NULL) {
> > + PMD_INIT_LOG(ERR, "Failed to allocate p2p queue
> info.");
> > + cpfl_p2p_queue_grps_del(vport);
> > + return 0;
> > + }
> > + ret = cpfl_p2p_queue_info_init(cpfl_vport,
> > + (struct virtchnl2_add_queue_groups
> > *)p2p_q_vc_out_info);
> > + if (ret != 0) {
> > + PMD_INIT_LOG(ERR, "Failed to init p2p queue info.");
> > + cpfl_p2p_queue_grps_del(vport);
>
> Forgot to free p2p_q_chunks_info?
> And better to use WARNING, as it is not returned with negative value.
Yes, need to free p2p_q_chunks_info. Will fix in next version.
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v9 00/14] net/cpfl: add hairpin queue support
2023-06-05 6:17 ` [PATCH v8 00/14] net/cpfl: add hairpin queue support beilei.xing
` (13 preceding siblings ...)
2023-06-05 6:17 ` [PATCH v8 14/14] doc: update the doc of CPFL PMD beilei.xing
@ 2023-06-05 9:06 ` beilei.xing
2023-06-05 9:06 ` [PATCH v9 01/14] net/cpfl: refine structures beilei.xing
` (14 more replies)
14 siblings, 15 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 9:06 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patchset adds hairpin queue support.
v2 changes:
- change hairpin rx queus configuration sequence.
- code refine.
v3 changes:
- Refine the patchset based on the latest code.
v4 change:
- Remove hairpin rx buffer queue's sw_ring.
- Change hairpin rx queus configuration sequence in cpfl_hairpin_bind function.
- Refind hairpin queue setup and release.
v5 change:
- Fix memory leak during queue setup.
- Refine hairpin Rxq/Txq start/stop.
v6 change:
- Add sign-off.
v7 change:
- Update cpfl.rst
v8 change:
- Fix Intel-compilation failure.
v9 change:
- Fix memory leak if fail to init queue group.
- Change log level.
Beilei Xing (14):
net/cpfl: refine structures
common/idpf: support queue groups add/delete
net/cpfl: add haipin queue group during vport init
net/cpfl: support hairpin queue capbility get
net/cpfl: support hairpin queue setup and release
common/idpf: add queue config API
net/cpfl: support hairpin queue configuration
common/idpf: add switch queue API
net/cpfl: support hairpin queue start/stop
common/idpf: add irq map config API
net/cpfl: enable write back based on ITR expire
net/cpfl: support peer ports get
net/cpfl: support hairpin bind/unbind
doc: update the doc of CPFL PMD
doc/guides/nics/cpfl.rst | 7 +
drivers/common/idpf/idpf_common_device.c | 75 ++
drivers/common/idpf/idpf_common_device.h | 4 +
drivers/common/idpf/idpf_common_virtchnl.c | 138 +++-
drivers/common/idpf/idpf_common_virtchnl.h | 18 +
drivers/common/idpf/version.map | 6 +
drivers/net/cpfl/cpfl_ethdev.c | 612 ++++++++++++++--
drivers/net/cpfl/cpfl_ethdev.h | 35 +-
drivers/net/cpfl/cpfl_rxtx.c | 789 +++++++++++++++++++--
drivers/net/cpfl/cpfl_rxtx.h | 76 ++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 21 +-
11 files changed, 1662 insertions(+), 119 deletions(-)
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v9 01/14] net/cpfl: refine structures
2023-06-05 9:06 ` [PATCH v9 00/14] net/cpfl: add hairpin queue support beilei.xing
@ 2023-06-05 9:06 ` beilei.xing
2023-06-05 9:06 ` [PATCH v9 02/14] common/idpf: support queue groups add/delete beilei.xing
` (13 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 9:06 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch refines some structures to support hairpin queue,
cpfl_rx_queue/cpfl_tx_queue/cpfl_vport.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 85 +++++++-----
drivers/net/cpfl/cpfl_ethdev.h | 6 +-
drivers/net/cpfl/cpfl_rxtx.c | 175 +++++++++++++++++-------
drivers/net/cpfl/cpfl_rxtx.h | 8 ++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 17 +--
5 files changed, 196 insertions(+), 95 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 7528a14d05..e587155db6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -124,7 +124,8 @@ static int
cpfl_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct rte_eth_link new_link;
unsigned int i;
@@ -156,7 +157,8 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,
static int
cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
dev_info->max_rx_queues = base->caps.max_rx_q;
@@ -216,7 +218,8 @@ cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
static int
cpfl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
/* mtu setting is forbidden if port is start */
if (dev->data->dev_started) {
@@ -256,12 +259,12 @@ static uint64_t
cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
{
uint64_t mbuf_alloc_failed = 0;
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i = 0;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- mbuf_alloc_failed += __atomic_load_n(&rxq->rx_stats.mbuf_alloc_failed,
+ cpfl_rxq = dev->data->rx_queues[i];
+ mbuf_alloc_failed += __atomic_load_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,
__ATOMIC_RELAXED);
}
@@ -271,8 +274,8 @@ cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
static int
cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
int ret;
@@ -305,20 +308,20 @@ cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
static void
cpfl_reset_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- __atomic_store_n(&rxq->rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
+ cpfl_rxq = dev->data->rx_queues[i];
+ __atomic_store_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
}
}
static int
cpfl_dev_stats_reset(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
int ret;
@@ -343,8 +346,8 @@ static int cpfl_dev_xstats_reset(struct rte_eth_dev *dev)
static int cpfl_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned int n)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
unsigned int i;
int ret;
@@ -459,7 +462,8 @@ cpfl_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t idx, shift;
int ret = 0;
@@ -498,7 +502,8 @@ cpfl_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t idx, shift;
int ret = 0;
@@ -536,7 +541,8 @@ static int
cpfl_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
int ret = 0;
@@ -601,7 +607,8 @@ static int
cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
int ret = 0;
@@ -638,7 +645,8 @@ cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,
static int
cpfl_dev_configure(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct rte_eth_conf *conf = &dev->data->dev_conf;
struct idpf_adapter *base = vport->adapter;
int ret;
@@ -710,7 +718,8 @@ cpfl_dev_configure(struct rte_eth_dev *dev)
static int
cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
uint16_t nb_rx_queues = dev->data->nb_rx_queues;
return idpf_vport_irq_map_config(vport, nb_rx_queues);
@@ -719,14 +728,14 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
static int
cpfl_start_queues(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
- struct idpf_tx_queue *txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
int err = 0;
int i;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL || txq->tx_deferred_start)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
continue;
err = cpfl_tx_queue_start(dev, i);
if (err != 0) {
@@ -736,8 +745,8 @@ cpfl_start_queues(struct rte_eth_dev *dev)
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- if (rxq == NULL || rxq->rx_deferred_start)
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)
continue;
err = cpfl_rx_queue_start(dev, i);
if (err != 0) {
@@ -752,7 +761,8 @@ cpfl_start_queues(struct rte_eth_dev *dev)
static int
cpfl_dev_start(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(base);
uint16_t num_allocated_vectors = base->caps.num_allocated_vectors;
@@ -813,7 +823,8 @@ cpfl_dev_start(struct rte_eth_dev *dev)
static int
cpfl_dev_stop(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
if (dev->data->dev_started == 0)
return 0;
@@ -832,7 +843,8 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
static int
cpfl_dev_close(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
@@ -842,7 +854,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
adapter->cur_vport_nb--;
dev->data->dev_private = NULL;
adapter->vports[vport->sw_idx] = NULL;
- rte_free(vport);
+ rte_free(cpfl_vport);
return 0;
}
@@ -1047,7 +1059,7 @@ cpfl_find_vport(struct cpfl_adapter_ext *adapter, uint32_t vport_id)
int i;
for (i = 0; i < adapter->cur_vport_nb; i++) {
- vport = adapter->vports[i];
+ vport = &adapter->vports[i]->base;
if (vport->vport_id != vport_id)
continue;
else
@@ -1275,7 +1287,8 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter)
static int
cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_vport_param *param = init_params;
struct cpfl_adapter_ext *adapter = param->adapter;
/* for sending create vport virtchnl msg prepare */
@@ -1300,7 +1313,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
goto err;
}
- adapter->vports[param->idx] = vport;
+ adapter->vports[param->idx] = cpfl_vport;
adapter->cur_vports |= RTE_BIT32(param->devarg_id);
adapter->cur_vport_nb++;
@@ -1415,7 +1428,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
snprintf(name, sizeof(name), "cpfl_%s_vport_0",
pci_dev->device.name);
retval = rte_eth_dev_create(&pci_dev->device, name,
- sizeof(struct idpf_vport),
+ sizeof(struct cpfl_vport),
NULL, NULL, cpfl_dev_vport_init,
&vport_param);
if (retval != 0)
@@ -1433,7 +1446,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
pci_dev->device.name,
devargs.req_vports[i]);
retval = rte_eth_dev_create(&pci_dev->device, name,
- sizeof(struct idpf_vport),
+ sizeof(struct cpfl_vport),
NULL, NULL, cpfl_dev_vport_init,
&vport_param);
if (retval != 0)
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 200dfcac02..81fe9ac4c3 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -69,13 +69,17 @@ struct cpfl_devargs {
uint16_t req_vport_nb;
};
+struct cpfl_vport {
+ struct idpf_vport base;
+};
+
struct cpfl_adapter_ext {
TAILQ_ENTRY(cpfl_adapter_ext) next;
struct idpf_adapter base;
char name[CPFL_ADAPTER_NAME_LEN];
- struct idpf_vport **vports;
+ struct cpfl_vport **vports;
uint16_t max_vport_nb;
uint16_t cur_vports; /* bit mask of created vport */
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 75021c3c54..04a51b8d15 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -128,7 +128,8 @@ cpfl_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *rxq,
uint16_t nb_desc, unsigned int socket_id,
struct rte_mempool *mp, uint8_t bufq_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct idpf_hw *hw = &base->hw;
const struct rte_memzone *mz;
@@ -220,15 +221,69 @@ cpfl_rx_split_bufq_release(struct idpf_rx_queue *bufq)
rte_free(bufq);
}
+static void
+cpfl_rx_queue_release(void *rxq)
+{
+ struct cpfl_rx_queue *cpfl_rxq = rxq;
+ struct idpf_rx_queue *q = NULL;
+
+ if (cpfl_rxq == NULL)
+ return;
+
+ q = &cpfl_rxq->base;
+
+ /* Split queue */
+ if (!q->adapter->is_rx_singleq) {
+ if (q->bufq2)
+ cpfl_rx_split_bufq_release(q->bufq2);
+
+ if (q->bufq1)
+ cpfl_rx_split_bufq_release(q->bufq1);
+
+ rte_free(cpfl_rxq);
+ return;
+ }
+
+ /* Single queue */
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(cpfl_rxq);
+}
+
+static void
+cpfl_tx_queue_release(void *txq)
+{
+ struct cpfl_tx_queue *cpfl_txq = txq;
+ struct idpf_tx_queue *q = NULL;
+
+ if (cpfl_txq == NULL)
+ return;
+
+ q = &cpfl_txq->base;
+
+ if (q->complq) {
+ rte_memzone_free(q->complq->mz);
+ rte_free(q->complq);
+ }
+
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(cpfl_txq);
+}
+
int
cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct idpf_hw *hw = &base->hw;
+ struct cpfl_rx_queue *cpfl_rxq;
const struct rte_memzone *mz;
struct idpf_rx_queue *rxq;
uint16_t rx_free_thresh;
@@ -248,21 +303,23 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Free memory if needed */
if (dev->data->rx_queues[queue_idx] != NULL) {
- idpf_qc_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
dev->data->rx_queues[queue_idx] = NULL;
}
/* Setup Rx queue */
- rxq = rte_zmalloc_socket("cpfl rxq",
- sizeof(struct idpf_rx_queue),
+ cpfl_rxq = rte_zmalloc_socket("cpfl rxq",
+ sizeof(struct cpfl_rx_queue),
RTE_CACHE_LINE_SIZE,
socket_id);
- if (rxq == NULL) {
+ if (cpfl_rxq == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
ret = -ENOMEM;
goto err_rxq_alloc;
}
+ rxq = &cpfl_rxq->base;
+
is_splitq = !!(vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
rxq->mp = mp;
@@ -329,7 +386,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
rxq->q_set = true;
- dev->data->rx_queues[queue_idx] = rxq;
+ dev->data->rx_queues[queue_idx] = cpfl_rxq;
return 0;
@@ -349,7 +406,8 @@ cpfl_tx_complq_setup(struct rte_eth_dev *dev, struct idpf_tx_queue *txq,
uint16_t queue_idx, uint16_t nb_desc,
unsigned int socket_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
const struct rte_memzone *mz;
struct idpf_tx_queue *cq;
int ret;
@@ -397,9 +455,11 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t tx_rs_thresh, tx_free_thresh;
+ struct cpfl_tx_queue *cpfl_txq;
struct idpf_hw *hw = &base->hw;
const struct rte_memzone *mz;
struct idpf_tx_queue *txq;
@@ -419,21 +479,23 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Free memory if needed. */
if (dev->data->tx_queues[queue_idx] != NULL) {
- idpf_qc_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
dev->data->tx_queues[queue_idx] = NULL;
}
/* Allocate the TX queue data structure. */
- txq = rte_zmalloc_socket("cpfl txq",
- sizeof(struct idpf_tx_queue),
+ cpfl_txq = rte_zmalloc_socket("cpfl txq",
+ sizeof(struct cpfl_tx_queue),
RTE_CACHE_LINE_SIZE,
socket_id);
- if (txq == NULL) {
+ if (cpfl_txq == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
ret = -ENOMEM;
goto err_txq_alloc;
}
+ txq = &cpfl_txq->base;
+
is_splitq = !!(vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
txq->nb_tx_desc = nb_desc;
@@ -487,7 +549,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
queue_idx * vport->chunks_info.tx_qtail_spacing);
txq->ops = &def_txq_ops;
txq->q_set = true;
- dev->data->tx_queues[queue_idx] = txq;
+ dev->data->tx_queues[queue_idx] = cpfl_txq;
return 0;
@@ -503,6 +565,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
+ struct cpfl_rx_queue *cpfl_rxq;
struct idpf_rx_queue *rxq;
uint16_t max_pkt_len;
uint32_t frame_size;
@@ -511,7 +574,8 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (rx_queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
- rxq = dev->data->rx_queues[rx_queue_id];
+ cpfl_rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = &cpfl_rxq->base;
if (rxq == NULL || !rxq->q_set) {
PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
@@ -575,9 +639,10 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_rx_queue *rxq =
- dev->data->rx_queues[rx_queue_id];
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq = dev->data->rx_queues[rx_queue_id];
+ struct idpf_rx_queue *rxq = &cpfl_rxq->base;
int err = 0;
err = idpf_vc_rxq_config(vport, rxq);
@@ -610,15 +675,15 @@ cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
if (tx_queue_id >= dev->data->nb_tx_queues)
return -EINVAL;
- txq = dev->data->tx_queues[tx_queue_id];
+ cpfl_txq = dev->data->tx_queues[tx_queue_id];
/* Init the RX tail register. */
- IDPF_PCI_REG_WRITE(txq->qtx_tail, 0);
+ IDPF_PCI_REG_WRITE(cpfl_txq->base.qtx_tail, 0);
return 0;
}
@@ -626,12 +691,13 @@ cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
int
cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_tx_queue *txq =
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_tx_queue *cpfl_txq =
dev->data->tx_queues[tx_queue_id];
int err = 0;
- err = idpf_vc_txq_config(vport, txq);
+ err = idpf_vc_txq_config(vport, &cpfl_txq->base);
if (err != 0) {
PMD_DRV_LOG(ERR, "Fail to configure Tx queue %u", tx_queue_id);
return err;
@@ -650,7 +716,7 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
tx_queue_id);
} else {
- txq->q_started = true;
+ cpfl_txq->base.q_started = true;
dev->data->tx_queue_state[tx_queue_id] =
RTE_ETH_QUEUE_STATE_STARTED;
}
@@ -661,13 +727,16 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
int
cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq;
struct idpf_rx_queue *rxq;
int err;
if (rx_queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
+ cpfl_rxq = dev->data->rx_queues[rx_queue_id];
err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
@@ -675,7 +744,7 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return err;
}
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = &cpfl_rxq->base;
rxq->q_started = false;
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
rxq->ops->release_mbufs(rxq);
@@ -693,13 +762,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_tx_queue *cpfl_txq;
struct idpf_tx_queue *txq;
int err;
if (tx_queue_id >= dev->data->nb_tx_queues)
return -EINVAL;
+ cpfl_txq = dev->data->tx_queues[tx_queue_id];
+
err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
@@ -707,7 +780,7 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
return err;
}
- txq = dev->data->tx_queues[tx_queue_id];
+ txq = &cpfl_txq->base;
txq->q_started = false;
txq->ops->release_mbufs(txq);
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
@@ -724,25 +797,25 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
void
cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- idpf_qc_rx_queue_release(dev->data->rx_queues[qid]);
+ cpfl_rx_queue_release(dev->data->rx_queues[qid]);
}
void
cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- idpf_qc_tx_queue_release(dev->data->tx_queues[qid]);
+ cpfl_tx_queue_release(dev->data->tx_queues[qid]);
}
void
cpfl_stop_queues(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
- struct idpf_tx_queue *txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
int i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- if (rxq == NULL)
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq == NULL)
continue;
if (cpfl_rx_queue_stop(dev, i) != 0)
@@ -750,8 +823,8 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL)
continue;
if (cpfl_tx_queue_stop(dev, i) != 0)
@@ -762,9 +835,10 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
void
cpfl_set_rx_function(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
#ifdef RTE_ARCH_X86
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i;
if (cpfl_rx_vec_dev_check_default(dev) == CPFL_VECTOR_PATH &&
@@ -790,8 +864,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- (void)idpf_qc_splitq_rx_vec_setup(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ (void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
if (vport->rx_use_avx512) {
@@ -810,8 +884,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
} else {
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- (void)idpf_qc_singleq_rx_vec_setup(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ (void)idpf_qc_singleq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
if (vport->rx_use_avx512) {
@@ -860,10 +934,11 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
void
cpfl_set_tx_function(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
#ifdef RTE_ARCH_X86
#ifdef CC_AVX512_SUPPORT
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
int i;
#endif /* CC_AVX512_SUPPORT */
@@ -878,8 +953,8 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
vport->tx_use_avx512 = true;
if (vport->tx_use_avx512) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- idpf_qc_tx_vec_avx512_setup(txq);
+ cpfl_txq = dev->data->tx_queues[i];
+ idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);
}
}
}
@@ -916,10 +991,10 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
#ifdef CC_AVX512_SUPPORT
if (vport->tx_use_avx512) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL)
continue;
- idpf_qc_tx_vec_avx512_setup(txq);
+ idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);
}
PMD_DRV_LOG(NOTICE,
"Using Single AVX512 Vector Tx (port %d).",
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index fb267d38c8..bfb9ad97bd 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -23,6 +23,14 @@
#define CPFL_SUPPORT_CHAIN_NUM 5
+struct cpfl_rx_queue {
+ struct idpf_rx_queue base;
+};
+
+struct cpfl_tx_queue {
+ struct idpf_tx_queue base;
+};
+
int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
index 665418d27d..5690b17911 100644
--- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
+++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
@@ -76,15 +76,16 @@ cpfl_rx_splitq_vec_default(struct idpf_rx_queue *rxq)
static inline int
cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_rx_queue *rxq;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq;
int i, default_ret, splitq_ret, ret = CPFL_SCALAR_PATH;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- default_ret = cpfl_rx_vec_queue_default(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
- splitq_ret = cpfl_rx_splitq_vec_default(rxq);
+ splitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq->base);
ret = splitq_ret && default_ret;
} else {
ret = default_ret;
@@ -100,12 +101,12 @@ static inline int
cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)
{
int i;
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
int ret = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- ret = cpfl_tx_vec_queue_default(txq);
+ cpfl_txq = dev->data->tx_queues[i];
+ ret = cpfl_tx_vec_queue_default(&cpfl_txq->base);
if (ret == CPFL_SCALAR_PATH)
return CPFL_SCALAR_PATH;
}
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v9 02/14] common/idpf: support queue groups add/delete
2023-06-05 9:06 ` [PATCH v9 00/14] net/cpfl: add hairpin queue support beilei.xing
2023-06-05 9:06 ` [PATCH v9 01/14] net/cpfl: refine structures beilei.xing
@ 2023-06-05 9:06 ` beilei.xing
2023-06-05 9:06 ` [PATCH v9 03/14] net/cpfl: add haipin queue group during vport init beilei.xing
` (12 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 9:06 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds queue group add/delete virtual channel support.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 66 ++++++++++++++++++++++
drivers/common/idpf/idpf_common_virtchnl.h | 9 +++
drivers/common/idpf/version.map | 2 +
3 files changed, 77 insertions(+)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index b713678634..a3fe55c897 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -359,6 +359,72 @@ idpf_vc_vport_destroy(struct idpf_vport *vport)
return err;
}
+int
+idpf_vc_queue_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *p2p_queue_grps_info,
+ uint8_t *p2p_queue_grps_out)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_cmd_info args;
+ int size, qg_info_size;
+ int err = -1;
+
+ size = sizeof(*p2p_queue_grps_info) +
+ (p2p_queue_grps_info->qg_info.num_queue_groups - 1) *
+ sizeof(struct virtchnl2_queue_group_info);
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_ADD_QUEUE_GROUPS;
+ args.in_args = (uint8_t *)p2p_queue_grps_info;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ if (err != 0) {
+ DRV_LOG(ERR,
+ "Failed to execute command of VIRTCHNL2_OP_ADD_QUEUE_GROUPS");
+ return err;
+ }
+
+ rte_memcpy(p2p_queue_grps_out, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
+ return 0;
+}
+
+int idpf_vc_queue_grps_del(struct idpf_vport *vport,
+ uint16_t num_q_grps,
+ struct virtchnl2_queue_group_id *qg_ids)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_delete_queue_groups *vc_del_q_grps;
+ struct idpf_cmd_info args;
+ int size;
+ int err;
+
+ size = sizeof(*vc_del_q_grps) +
+ (num_q_grps - 1) * sizeof(struct virtchnl2_queue_group_id);
+ vc_del_q_grps = rte_zmalloc("vc_del_q_grps", size, 0);
+
+ vc_del_q_grps->vport_id = vport->vport_id;
+ vc_del_q_grps->num_queue_groups = num_q_grps;
+ memcpy(vc_del_q_grps->qg_ids, qg_ids,
+ num_q_grps * sizeof(struct virtchnl2_queue_group_id));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_DEL_QUEUE_GROUPS;
+ args.in_args = (uint8_t *)vc_del_q_grps;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_DEL_QUEUE_GROUPS");
+
+ rte_free(vc_del_q_grps);
+ return err;
+}
+
int
idpf_vc_rss_key_set(struct idpf_vport *vport)
{
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index c45295290e..58b16e1c5d 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -64,4 +64,13 @@ int idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
__rte_internal
int idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
u16 *buff_count, struct idpf_dma_mem **buffs);
+__rte_internal
+int idpf_vc_queue_grps_del(struct idpf_vport *vport,
+ uint16_t num_q_grps,
+ struct virtchnl2_queue_group_id *qg_ids);
+__rte_internal
+int
+idpf_vc_queue_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *ptp_queue_grps_info,
+ uint8_t *ptp_queue_grps_out);
#endif /* _IDPF_COMMON_VIRTCHNL_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 70334a1b03..01d18f3f3f 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -43,6 +43,8 @@ INTERNAL {
idpf_vc_irq_map_unmap_config;
idpf_vc_one_msg_read;
idpf_vc_ptype_info_query;
+ idpf_vc_queue_grps_add;
+ idpf_vc_queue_grps_del;
idpf_vc_queue_switch;
idpf_vc_queues_ena_dis;
idpf_vc_rss_hash_get;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v9 03/14] net/cpfl: add haipin queue group during vport init
2023-06-05 9:06 ` [PATCH v9 00/14] net/cpfl: add hairpin queue support beilei.xing
2023-06-05 9:06 ` [PATCH v9 01/14] net/cpfl: refine structures beilei.xing
2023-06-05 9:06 ` [PATCH v9 02/14] common/idpf: support queue groups add/delete beilei.xing
@ 2023-06-05 9:06 ` beilei.xing
2023-06-05 9:06 ` [PATCH v9 04/14] net/cpfl: support hairpin queue capbility get beilei.xing
` (11 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 9:06 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds haipin queue group during vport init.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 134 +++++++++++++++++++++++++++++++++
drivers/net/cpfl/cpfl_ethdev.h | 18 +++++
drivers/net/cpfl/cpfl_rxtx.h | 7 ++
3 files changed, 159 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index e587155db6..7f34cd288c 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -840,6 +840,20 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
return 0;
}
+static int
+cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
+{
+ struct virtchnl2_queue_group_id qg_ids[CPFL_P2P_NB_QUEUE_GRPS] = {0};
+ int ret = 0;
+
+ qg_ids[0].queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
+ qg_ids[0].queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
+ ret = idpf_vc_queue_grps_del(vport, CPFL_P2P_NB_QUEUE_GRPS, qg_ids);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to delete p2p queue groups");
+ return ret;
+}
+
static int
cpfl_dev_close(struct rte_eth_dev *dev)
{
@@ -848,7 +862,12 @@ cpfl_dev_close(struct rte_eth_dev *dev)
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
+
+ if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
+ cpfl_p2p_queue_grps_del(vport);
+
idpf_vport_deinit(vport);
+ rte_free(cpfl_vport->p2p_q_chunks_info);
adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
adapter->cur_vport_nb--;
@@ -1284,6 +1303,96 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter)
return vport_idx;
}
+static int
+cpfl_p2p_q_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *p2p_queue_grps_info,
+ uint8_t *p2p_q_vc_out_info)
+{
+ int ret;
+
+ p2p_queue_grps_info->vport_id = vport->vport_id;
+ p2p_queue_grps_info->qg_info.num_queue_groups = CPFL_P2P_NB_QUEUE_GRPS;
+ p2p_queue_grps_info->qg_info.groups[0].num_rx_q = CPFL_MAX_P2P_NB_QUEUES;
+ p2p_queue_grps_info->qg_info.groups[0].num_rx_bufq = CPFL_P2P_NB_RX_BUFQ;
+ p2p_queue_grps_info->qg_info.groups[0].num_tx_q = CPFL_MAX_P2P_NB_QUEUES;
+ p2p_queue_grps_info->qg_info.groups[0].num_tx_complq = CPFL_P2P_NB_TX_COMPLQ;
+ p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
+ p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
+ p2p_queue_grps_info->qg_info.groups[0].rx_q_grp_info.rss_lut_size = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.tx_tc = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.priority = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.is_sp = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.pir_weight = 0;
+
+ ret = idpf_vc_queue_grps_add(vport, p2p_queue_grps_info, p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to add p2p queue groups.");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
+ struct virtchnl2_add_queue_groups *p2p_q_vc_out_info)
+{
+ struct p2p_queue_chunks_info *p2p_q_chunks_info = cpfl_vport->p2p_q_chunks_info;
+ struct virtchnl2_queue_reg_chunks *vc_chunks_out;
+ int i, type;
+
+ if (p2p_q_vc_out_info->qg_info.groups[0].qg_id.queue_group_type !=
+ VIRTCHNL2_QUEUE_GROUP_P2P) {
+ PMD_DRV_LOG(ERR, "Add queue group response mismatch.");
+ return -EINVAL;
+ }
+
+ vc_chunks_out = &p2p_q_vc_out_info->qg_info.groups[0].chunks;
+
+ for (i = 0; i < vc_chunks_out->num_chunks; i++) {
+ type = vc_chunks_out->chunks[i].type;
+ switch (type) {
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ p2p_q_chunks_info->tx_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->tx_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->tx_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ p2p_q_chunks_info->rx_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->rx_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->rx_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ p2p_q_chunks_info->tx_compl_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->tx_compl_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->tx_compl_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ p2p_q_chunks_info->rx_buf_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->rx_buf_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->rx_buf_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported queue type");
+ break;
+ }
+ }
+
+ return 0;
+}
+
static int
cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
{
@@ -1293,6 +1402,8 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
struct cpfl_adapter_ext *adapter = param->adapter;
/* for sending create vport virtchnl msg prepare */
struct virtchnl2_create_vport create_vport_info;
+ struct virtchnl2_add_queue_groups p2p_queue_grps_info;
+ uint8_t p2p_q_vc_out_info[IDPF_DFLT_MBX_BUF_SIZE] = {0};
int ret = 0;
dev->dev_ops = &cpfl_eth_dev_ops;
@@ -1327,6 +1438,29 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
&dev->data->mac_addrs[0]);
+ if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
+ memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
+ ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_INIT_LOG(WARNING, "Failed to add p2p queue group.");
+ return 0;
+ }
+ cpfl_vport->p2p_q_chunks_info = rte_zmalloc(NULL,
+ sizeof(struct p2p_queue_chunks_info), 0);
+ if (cpfl_vport->p2p_q_chunks_info == NULL) {
+ PMD_INIT_LOG(WARNING, "Failed to allocate p2p queue info.");
+ cpfl_p2p_queue_grps_del(vport);
+ return 0;
+ }
+ ret = cpfl_p2p_queue_info_init(cpfl_vport,
+ (struct virtchnl2_add_queue_groups *)p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_INIT_LOG(WARNING, "Failed to init p2p queue info.");
+ rte_free(cpfl_vport->p2p_q_chunks_info);
+ cpfl_p2p_queue_grps_del(vport);
+ }
+ }
+
return 0;
err_mac_addrs:
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 81fe9ac4c3..666d46a44a 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -56,6 +56,7 @@
/* Device IDs */
#define IDPF_DEV_ID_CPF 0x1453
+#define VIRTCHNL2_QUEUE_GROUP_P2P 0x100
struct cpfl_vport_param {
struct cpfl_adapter_ext *adapter;
@@ -69,8 +70,25 @@ struct cpfl_devargs {
uint16_t req_vport_nb;
};
+struct p2p_queue_chunks_info {
+ uint32_t tx_start_qid;
+ uint32_t rx_start_qid;
+ uint32_t tx_compl_start_qid;
+ uint32_t rx_buf_start_qid;
+
+ uint64_t tx_qtail_start;
+ uint32_t tx_qtail_spacing;
+ uint64_t rx_qtail_start;
+ uint32_t rx_qtail_spacing;
+ uint64_t tx_compl_qtail_start;
+ uint32_t tx_compl_qtail_spacing;
+ uint64_t rx_buf_qtail_start;
+ uint32_t rx_buf_qtail_spacing;
+};
+
struct cpfl_vport {
struct idpf_vport base;
+ struct p2p_queue_chunks_info *p2p_q_chunks_info;
};
struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index bfb9ad97bd..1fe65778f0 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -13,6 +13,13 @@
#define CPFL_MIN_RING_DESC 32
#define CPFL_MAX_RING_DESC 4096
#define CPFL_DMA_MEM_ALIGN 4096
+
+#define CPFL_MAX_P2P_NB_QUEUES 16
+#define CPFL_P2P_NB_RX_BUFQ 1
+#define CPFL_P2P_NB_TX_COMPLQ 1
+#define CPFL_P2P_NB_QUEUE_GRPS 1
+#define CPFL_P2P_QUEUE_GRP_ID 1
+
/* Base address of the HW descriptor ring should be 128B aligned. */
#define CPFL_RING_BASE_ALIGN 128
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v9 04/14] net/cpfl: support hairpin queue capbility get
2023-06-05 9:06 ` [PATCH v9 00/14] net/cpfl: add hairpin queue support beilei.xing
` (2 preceding siblings ...)
2023-06-05 9:06 ` [PATCH v9 03/14] net/cpfl: add haipin queue group during vport init beilei.xing
@ 2023-06-05 9:06 ` beilei.xing
2023-06-05 9:06 ` [PATCH v9 05/14] net/cpfl: support hairpin queue setup and release beilei.xing
` (10 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 9:06 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch adds hairpin_cap_get ops support.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 18 ++++++++++++++++++
drivers/net/cpfl/cpfl_rxtx.h | 3 +++
2 files changed, 21 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 7f34cd288c..4a7e1124b1 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -154,6 +154,23 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,
return rte_eth_linkstatus_set(dev, &new_link);
}
+static int
+cpfl_hairpin_cap_get(struct rte_eth_dev *dev,
+ struct rte_eth_hairpin_cap *cap)
+{
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+
+ if (cpfl_vport->p2p_q_chunks_info == NULL)
+ return -ENOTSUP;
+
+ cap->max_nb_queues = CPFL_MAX_P2P_NB_QUEUES;
+ cap->max_rx_2_tx = CPFL_MAX_HAIRPINQ_RX_2_TX;
+ cap->max_tx_2_rx = CPFL_MAX_HAIRPINQ_TX_2_RX;
+ cap->max_nb_desc = CPFL_MAX_HAIRPINQ_NB_DESC;
+
+ return 0;
+}
+
static int
cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
@@ -904,6 +921,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.xstats_get = cpfl_dev_xstats_get,
.xstats_get_names = cpfl_dev_xstats_get_names,
.xstats_reset = cpfl_dev_xstats_reset,
+ .hairpin_cap_get = cpfl_hairpin_cap_get,
};
static int
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 1fe65778f0..a4a164d462 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -14,6 +14,9 @@
#define CPFL_MAX_RING_DESC 4096
#define CPFL_DMA_MEM_ALIGN 4096
+#define CPFL_MAX_HAIRPINQ_RX_2_TX 1
+#define CPFL_MAX_HAIRPINQ_TX_2_RX 1
+#define CPFL_MAX_HAIRPINQ_NB_DESC 1024
#define CPFL_MAX_P2P_NB_QUEUES 16
#define CPFL_P2P_NB_RX_BUFQ 1
#define CPFL_P2P_NB_TX_COMPLQ 1
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v9 05/14] net/cpfl: support hairpin queue setup and release
2023-06-05 9:06 ` [PATCH v9 00/14] net/cpfl: add hairpin queue support beilei.xing
` (3 preceding siblings ...)
2023-06-05 9:06 ` [PATCH v9 04/14] net/cpfl: support hairpin queue capbility get beilei.xing
@ 2023-06-05 9:06 ` beilei.xing
2023-06-05 9:06 ` [PATCH v9 06/14] common/idpf: add queue config API beilei.xing
` (9 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 9:06 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
Support hairpin Rx/Tx queue setup and release.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 6 +
drivers/net/cpfl/cpfl_ethdev.h | 11 +
drivers/net/cpfl/cpfl_rxtx.c | 364 +++++++++++++++++++++++-
drivers/net/cpfl/cpfl_rxtx.h | 36 +++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 4 +
5 files changed, 420 insertions(+), 1 deletion(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 4a7e1124b1..d64b506038 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -879,6 +879,10 @@ cpfl_dev_close(struct rte_eth_dev *dev)
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
+ if (cpfl_vport->p2p_mp) {
+ rte_mempool_free(cpfl_vport->p2p_mp);
+ cpfl_vport->p2p_mp = NULL;
+ }
if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
cpfl_p2p_queue_grps_del(vport);
@@ -922,6 +926,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.xstats_get_names = cpfl_dev_xstats_get_names,
.xstats_reset = cpfl_dev_xstats_reset,
.hairpin_cap_get = cpfl_hairpin_cap_get,
+ .rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
+ .tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
};
static int
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 666d46a44a..2e42354f70 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -89,6 +89,17 @@ struct p2p_queue_chunks_info {
struct cpfl_vport {
struct idpf_vport base;
struct p2p_queue_chunks_info *p2p_q_chunks_info;
+
+ struct rte_mempool *p2p_mp;
+
+ uint16_t nb_data_rxq;
+ uint16_t nb_data_txq;
+ uint16_t nb_p2p_rxq;
+ uint16_t nb_p2p_txq;
+
+ struct idpf_rx_queue *p2p_rx_bufq;
+ struct idpf_tx_queue *p2p_tx_complq;
+ bool p2p_manual_bind;
};
struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 04a51b8d15..90b408d1f4 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -10,6 +10,67 @@
#include "cpfl_rxtx.h"
#include "cpfl_rxtx_vec_common.h"
+static inline void
+cpfl_tx_hairpin_descq_reset(struct idpf_tx_queue *txq)
+{
+ uint32_t i, size;
+
+ if (!txq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+ return;
+ }
+
+ size = txq->nb_tx_desc * CPFL_P2P_DESC_LEN;
+ for (i = 0; i < size; i++)
+ ((volatile char *)txq->desc_ring)[i] = 0;
+}
+
+static inline void
+cpfl_tx_hairpin_complq_reset(struct idpf_tx_queue *cq)
+{
+ uint32_t i, size;
+
+ if (!cq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to complq is NULL");
+ return;
+ }
+
+ size = cq->nb_tx_desc * CPFL_P2P_DESC_LEN;
+ for (i = 0; i < size; i++)
+ ((volatile char *)cq->compl_ring)[i] = 0;
+}
+
+static inline void
+cpfl_rx_hairpin_descq_reset(struct idpf_rx_queue *rxq)
+{
+ uint16_t len;
+ uint32_t i;
+
+ if (!rxq)
+ return;
+
+ len = rxq->nb_rx_desc;
+ for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
+ ((volatile char *)rxq->rx_ring)[i] = 0;
+}
+
+static inline void
+cpfl_rx_hairpin_bufq_reset(struct idpf_rx_queue *rxbq)
+{
+ uint16_t len;
+ uint32_t i;
+
+ if (!rxbq)
+ return;
+
+ len = rxbq->nb_rx_desc;
+ for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
+ ((volatile char *)rxbq->rx_ring)[i] = 0;
+
+ rxbq->bufq1 = NULL;
+ rxbq->bufq2 = NULL;
+}
+
static uint64_t
cpfl_rx_offload_convert(uint64_t offload)
{
@@ -234,7 +295,10 @@ cpfl_rx_queue_release(void *rxq)
/* Split queue */
if (!q->adapter->is_rx_singleq) {
- if (q->bufq2)
+ /* the mz is shared between Tx/Rx hairpin, let Rx_release
+ * free the buf, q->bufq1->mz and q->mz.
+ */
+ if (!cpfl_rxq->hairpin_info.hairpin_q && q->bufq2)
cpfl_rx_split_bufq_release(q->bufq2);
if (q->bufq1)
@@ -385,6 +449,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
}
+ cpfl_vport->nb_data_rxq++;
rxq->q_set = true;
dev->data->rx_queues[queue_idx] = cpfl_rxq;
@@ -548,6 +613,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
queue_idx * vport->chunks_info.tx_qtail_spacing);
txq->ops = &def_txq_ops;
+ cpfl_vport->nb_data_txq++;
txq->q_set = true;
dev->data->tx_queues[queue_idx] = cpfl_txq;
@@ -562,6 +628,300 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return ret;
}
+static int
+cpfl_rx_hairpin_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,
+ uint16_t logic_qid, uint16_t nb_desc)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct rte_mempool *mp;
+ char pool_name[RTE_MEMPOOL_NAMESIZE];
+
+ mp = cpfl_vport->p2p_mp;
+ if (!mp) {
+ snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "p2p_mb_pool_%u",
+ dev->data->port_id);
+ mp = rte_pktmbuf_pool_create(pool_name, CPFL_P2P_NB_MBUF * CPFL_MAX_P2P_NB_QUEUES,
+ CPFL_P2P_CACHE_SIZE, 0, CPFL_P2P_MBUF_SIZE,
+ dev->device->numa_node);
+ if (!mp) {
+ PMD_INIT_LOG(ERR, "Failed to allocate mbuf pool for p2p");
+ return -ENOMEM;
+ }
+ cpfl_vport->p2p_mp = mp;
+ }
+
+ bufq->mp = mp;
+ bufq->nb_rx_desc = nb_desc;
+ bufq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_buf_start_qid,
+ logic_qid);
+ bufq->port_id = dev->data->port_id;
+ bufq->adapter = adapter;
+ bufq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
+
+ bufq->q_set = true;
+ bufq->ops = &def_rxq_ops;
+
+ return 0;
+}
+
+int
+cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf)
+{
+ struct cpfl_vport *cpfl_vport = (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter_base = vport->adapter;
+ uint16_t logic_qid = cpfl_vport->nb_p2p_rxq;
+ struct cpfl_rxq_hairpin_info *hairpin_info;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct idpf_rx_queue *bufq1 = NULL;
+ struct idpf_rx_queue *rxq;
+ uint16_t peer_port, peer_q;
+ uint16_t qid;
+ int ret;
+
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue.");
+ return -EINVAL;
+ }
+
+ if (conf->peer_count != 1) {
+ PMD_INIT_LOG(ERR, "Can't support Rx hairpin queue peer count %d", conf->peer_count);
+ return -EINVAL;
+ }
+
+ peer_port = conf->peers[0].port;
+ peer_q = conf->peers[0].queue;
+
+ if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
+ nb_desc > CPFL_MAX_RING_DESC ||
+ nb_desc < CPFL_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is invalid", nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory if needed */
+ if (dev->data->rx_queues[queue_idx]) {
+ cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* Setup Rx description queue */
+ cpfl_rxq = rte_zmalloc_socket("cpfl hairpin rxq",
+ sizeof(struct cpfl_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!cpfl_rxq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
+ return -ENOMEM;
+ }
+
+ rxq = &cpfl_rxq->base;
+ hairpin_info = &cpfl_rxq->hairpin_info;
+ rxq->nb_rx_desc = nb_desc * 2;
+ rxq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid, logic_qid);
+ rxq->port_id = dev->data->port_id;
+ rxq->adapter = adapter_base;
+ rxq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
+ hairpin_info->hairpin_q = true;
+ hairpin_info->peer_txp = peer_port;
+ hairpin_info->peer_txq_id = peer_q;
+
+ if (conf->manual_bind != 0)
+ cpfl_vport->p2p_manual_bind = true;
+ else
+ cpfl_vport->p2p_manual_bind = false;
+
+ if (cpfl_vport->p2p_rx_bufq == NULL) {
+ bufq1 = rte_zmalloc_socket("hairpin rx bufq1",
+ sizeof(struct idpf_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!bufq1) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for hairpin Rx buffer queue 1.");
+ ret = -ENOMEM;
+ goto err_alloc_bufq1;
+ }
+ qid = 2 * logic_qid;
+ ret = cpfl_rx_hairpin_bufq_setup(dev, bufq1, qid, nb_desc);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to setup hairpin Rx buffer queue 1");
+ ret = -EINVAL;
+ goto err_setup_bufq1;
+ }
+ cpfl_vport->p2p_rx_bufq = bufq1;
+ }
+
+ rxq->bufq1 = cpfl_vport->p2p_rx_bufq;
+ rxq->bufq2 = NULL;
+
+ cpfl_vport->nb_p2p_rxq++;
+ rxq->q_set = true;
+ dev->data->rx_queues[queue_idx] = cpfl_rxq;
+
+ return 0;
+
+err_setup_bufq1:
+ rte_mempool_free(cpfl_vport->p2p_mp);
+ rte_free(bufq1);
+err_alloc_bufq1:
+ rte_free(cpfl_rxq);
+
+ return ret;
+}
+
+int
+cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter_base = vport->adapter;
+ uint16_t logic_qid = cpfl_vport->nb_p2p_txq;
+ struct cpfl_txq_hairpin_info *hairpin_info;
+ struct idpf_hw *hw = &adapter_base->hw;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct idpf_tx_queue *txq, *cq;
+ const struct rte_memzone *mz;
+ uint32_t ring_size;
+ uint16_t peer_port, peer_q;
+ int ret;
+
+ if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue.");
+ return -EINVAL;
+ }
+
+ if (conf->peer_count != 1) {
+ PMD_INIT_LOG(ERR, "Can't support Tx hairpin queue peer count %d", conf->peer_count);
+ return -EINVAL;
+ }
+
+ peer_port = conf->peers[0].port;
+ peer_q = conf->peers[0].queue;
+
+ if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
+ nb_desc > CPFL_MAX_RING_DESC ||
+ nb_desc < CPFL_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is invalid",
+ nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory if needed. */
+ if (dev->data->tx_queues[queue_idx]) {
+ cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate the TX queue data structure. */
+ cpfl_txq = rte_zmalloc_socket("cpfl hairpin txq",
+ sizeof(struct cpfl_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!cpfl_txq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+ return -ENOMEM;
+ }
+
+ txq = &cpfl_txq->base;
+ hairpin_info = &cpfl_txq->hairpin_info;
+ /* Txq ring length should be 2 times of Tx completion queue size. */
+ txq->nb_tx_desc = nb_desc * 2;
+ txq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_start_qid, logic_qid);
+ txq->port_id = dev->data->port_id;
+ hairpin_info->hairpin_q = true;
+ hairpin_info->peer_rxp = peer_port;
+ hairpin_info->peer_rxq_id = peer_q;
+
+ if (conf->manual_bind != 0)
+ cpfl_vport->p2p_manual_bind = true;
+ else
+ cpfl_vport->p2p_manual_bind = false;
+
+ /* Always Tx hairpin queue allocates Tx HW ring */
+ ring_size = RTE_ALIGN(txq->nb_tx_desc * CPFL_P2P_DESC_LEN,
+ CPFL_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_ring", logic_qid,
+ ring_size + CPFL_P2P_RING_BUF,
+ CPFL_RING_BASE_ALIGN,
+ dev->device->numa_node);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
+ ret = -ENOMEM;
+ goto err_txq_mz_rsv;
+ }
+
+ txq->tx_ring_phys_addr = mz->iova;
+ txq->desc_ring = mz->addr;
+ txq->mz = mz;
+
+ cpfl_tx_hairpin_descq_reset(txq);
+ txq->qtx_tail = hw->hw_addr +
+ cpfl_hw_qtail_get(cpfl_vport->p2p_q_chunks_info->tx_qtail_start,
+ logic_qid, cpfl_vport->p2p_q_chunks_info->tx_qtail_spacing);
+ txq->ops = &def_txq_ops;
+
+ if (cpfl_vport->p2p_tx_complq == NULL) {
+ cq = rte_zmalloc_socket("cpfl hairpin cq",
+ sizeof(struct idpf_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ dev->device->numa_node);
+ if (!cq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+ ret = -ENOMEM;
+ goto err_cq_alloc;
+ }
+
+ cq->nb_tx_desc = nb_desc;
+ cq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_compl_start_qid,
+ 0);
+ cq->port_id = dev->data->port_id;
+
+ /* Tx completion queue always allocates the HW ring */
+ ring_size = RTE_ALIGN(cq->nb_tx_desc * CPFL_P2P_DESC_LEN,
+ CPFL_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_compl_ring", logic_qid,
+ ring_size + CPFL_P2P_RING_BUF,
+ CPFL_RING_BASE_ALIGN,
+ dev->device->numa_node);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX completion queue");
+ ret = -ENOMEM;
+ goto err_cq_mz_rsv;
+ }
+ cq->tx_ring_phys_addr = mz->iova;
+ cq->compl_ring = mz->addr;
+ cq->mz = mz;
+
+ cpfl_tx_hairpin_complq_reset(cq);
+ cpfl_vport->p2p_tx_complq = cq;
+ }
+
+ txq->complq = cpfl_vport->p2p_tx_complq;
+
+ cpfl_vport->nb_p2p_txq++;
+ txq->q_set = true;
+ dev->data->tx_queues[queue_idx] = cpfl_txq;
+
+ return 0;
+
+err_cq_mz_rsv:
+ rte_free(cq);
+err_cq_alloc:
+ cpfl_dma_zone_release(mz);
+err_txq_mz_rsv:
+ rte_free(cpfl_txq);
+ return ret;
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
@@ -865,6 +1225,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ continue;
(void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index a4a164d462..06198d4aad 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -22,6 +22,11 @@
#define CPFL_P2P_NB_TX_COMPLQ 1
#define CPFL_P2P_NB_QUEUE_GRPS 1
#define CPFL_P2P_QUEUE_GRP_ID 1
+#define CPFL_P2P_DESC_LEN 16
+#define CPFL_P2P_NB_MBUF 4096
+#define CPFL_P2P_CACHE_SIZE 250
+#define CPFL_P2P_MBUF_SIZE 2048
+#define CPFL_P2P_RING_BUF 128
/* Base address of the HW descriptor ring should be 128B aligned. */
#define CPFL_RING_BASE_ALIGN 128
@@ -33,14 +38,40 @@
#define CPFL_SUPPORT_CHAIN_NUM 5
+struct cpfl_rxq_hairpin_info {
+ bool hairpin_q; /* if rx queue is a hairpin queue */
+ uint16_t peer_txp;
+ uint16_t peer_txq_id;
+};
+
struct cpfl_rx_queue {
struct idpf_rx_queue base;
+ struct cpfl_rxq_hairpin_info hairpin_info;
+};
+
+struct cpfl_txq_hairpin_info {
+ bool hairpin_q; /* if tx queue is a hairpin queue */
+ uint16_t peer_rxp;
+ uint16_t peer_rxq_id;
};
struct cpfl_tx_queue {
struct idpf_tx_queue base;
+ struct cpfl_txq_hairpin_info hairpin_info;
};
+static inline uint16_t
+cpfl_hw_qid_get(uint16_t start_qid, uint16_t offset)
+{
+ return start_qid + offset;
+}
+
+static inline uint64_t
+cpfl_hw_qtail_get(uint64_t tail_start, uint16_t offset, uint64_t tail_spacing)
+{
+ return tail_start + offset * tail_spacing;
+}
+
int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
@@ -59,4 +90,9 @@ void cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void cpfl_set_rx_function(struct rte_eth_dev *dev);
void cpfl_set_tx_function(struct rte_eth_dev *dev);
+int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, const struct rte_eth_hairpin_conf *conf);
+int cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf);
#endif /* _CPFL_RXTX_H_ */
diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
index 5690b17911..d8e9191196 100644
--- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
+++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
@@ -85,6 +85,8 @@ cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)
cpfl_rxq = dev->data->rx_queues[i];
default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ continue;
splitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq->base);
ret = splitq_ret && default_ret;
} else {
@@ -106,6 +108,8 @@ cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_tx_queues; i++) {
cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq->hairpin_info.hairpin_q)
+ continue;
ret = cpfl_tx_vec_queue_default(&cpfl_txq->base);
if (ret == CPFL_SCALAR_PATH)
return CPFL_SCALAR_PATH;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v9 06/14] common/idpf: add queue config API
2023-06-05 9:06 ` [PATCH v9 00/14] net/cpfl: add hairpin queue support beilei.xing
` (4 preceding siblings ...)
2023-06-05 9:06 ` [PATCH v9 05/14] net/cpfl: support hairpin queue setup and release beilei.xing
@ 2023-06-05 9:06 ` beilei.xing
2023-06-05 9:06 ` [PATCH v9 07/14] net/cpfl: support hairpin queue configuration beilei.xing
` (8 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 9:06 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx queue configuration APIs.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 70 ++++++++++++++++++++++
drivers/common/idpf/idpf_common_virtchnl.h | 6 ++
drivers/common/idpf/version.map | 2 +
3 files changed, 78 insertions(+)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index a3fe55c897..211b44a88e 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -1050,6 +1050,41 @@ idpf_vc_rxq_config(struct idpf_vport *vport, struct idpf_rx_queue *rxq)
return err;
}
+int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct virtchnl2_rxq_info *rxq_info,
+ uint16_t num_qs)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+ struct idpf_cmd_info args;
+ int size, err, i;
+
+ size = sizeof(*vc_rxqs) + (num_qs - 1) *
+ sizeof(struct virtchnl2_rxq_info);
+ vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+ if (vc_rxqs == NULL) {
+ DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+ err = -ENOMEM;
+ return err;
+ }
+ vc_rxqs->vport_id = vport->vport_id;
+ vc_rxqs->num_qinfo = num_qs;
+ memcpy(vc_rxqs->qinfo, rxq_info, num_qs * sizeof(struct virtchnl2_rxq_info));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+ args.in_args = (uint8_t *)vc_rxqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ rte_free(vc_rxqs);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+ return err;
+}
+
int
idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq)
{
@@ -1121,6 +1156,41 @@ idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq)
return err;
}
+int
+idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
+ uint16_t num_qs)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+ struct idpf_cmd_info args;
+ int size, err;
+
+ size = sizeof(*vc_txqs) + (num_qs - 1) * sizeof(struct virtchnl2_txq_info);
+ vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+ if (vc_txqs == NULL) {
+ DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+ err = -ENOMEM;
+ return err;
+ }
+ vc_txqs->vport_id = vport->vport_id;
+ vc_txqs->num_qinfo = num_qs;
+ memcpy(vc_txqs->qinfo, txq_info, num_qs * sizeof(struct virtchnl2_txq_info));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+ args.in_args = (uint8_t *)vc_txqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ rte_free(vc_txqs);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+ return err;
+}
+
int
idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
struct idpf_ctlq_msg *q_msg)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index 58b16e1c5d..db83761a5e 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -65,6 +65,12 @@ __rte_internal
int idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
u16 *buff_count, struct idpf_dma_mem **buffs);
__rte_internal
+int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct virtchnl2_rxq_info *rxq_info,
+ uint16_t num_qs);
+__rte_internal
+int idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
+ uint16_t num_qs);
+__rte_internal
int idpf_vc_queue_grps_del(struct idpf_vport *vport,
uint16_t num_q_grps,
struct virtchnl2_queue_group_id *qg_ids);
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 01d18f3f3f..17e77884ce 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -54,8 +54,10 @@ INTERNAL {
idpf_vc_rss_lut_get;
idpf_vc_rss_lut_set;
idpf_vc_rxq_config;
+ idpf_vc_rxq_config_by_info;
idpf_vc_stats_query;
idpf_vc_txq_config;
+ idpf_vc_txq_config_by_info;
idpf_vc_vectors_alloc;
idpf_vc_vectors_dealloc;
idpf_vc_vport_create;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v9 07/14] net/cpfl: support hairpin queue configuration
2023-06-05 9:06 ` [PATCH v9 00/14] net/cpfl: add hairpin queue support beilei.xing
` (5 preceding siblings ...)
2023-06-05 9:06 ` [PATCH v9 06/14] common/idpf: add queue config API beilei.xing
@ 2023-06-05 9:06 ` beilei.xing
2023-06-05 9:06 ` [PATCH v9 08/14] common/idpf: add switch queue API beilei.xing
` (7 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 9:06 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx hairpin queue configuration.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 136 +++++++++++++++++++++++++++++++--
drivers/net/cpfl/cpfl_rxtx.c | 88 +++++++++++++++++++++
drivers/net/cpfl/cpfl_rxtx.h | 7 ++
3 files changed, 225 insertions(+), 6 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index d64b506038..0696c6bc68 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -742,33 +742,157 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
return idpf_vport_irq_map_config(vport, nb_rx_queues);
}
+/* Update hairpin_info for dev's tx hairpin queue */
+static int
+cpfl_txq_hairpin_info_update(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port];
+ struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private;
+ struct cpfl_txq_hairpin_info *hairpin_info;
+ struct cpfl_tx_queue *cpfl_txq;
+ int i;
+
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ hairpin_info = &cpfl_txq->hairpin_info;
+ if (hairpin_info->peer_rxp != rx_port) {
+ PMD_DRV_LOG(ERR, "port %d is not the peer port", rx_port);
+ return -EINVAL;
+ }
+ hairpin_info->peer_rxq_id =
+ cpfl_hw_qid_get(cpfl_rx_vport->p2p_q_chunks_info->rx_start_qid,
+ hairpin_info->peer_rxq_id - cpfl_rx_vport->nb_data_rxq);
+ }
+
+ return 0;
+}
+
+/* Bind Rx hairpin queue's memory zone to peer Tx hairpin queue's memory zone */
+static void
+cpfl_rxq_hairpin_mz_bind(struct rte_eth_dev *dev)
+{
+ struct cpfl_vport *cpfl_rx_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_rx_vport->base;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_hw *hw = &adapter->hw;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct rte_eth_dev *peer_dev;
+ const struct rte_memzone *mz;
+ uint16_t peer_tx_port;
+ uint16_t peer_tx_qid;
+ int i;
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ peer_tx_port = cpfl_rxq->hairpin_info.peer_txp;
+ peer_tx_qid = cpfl_rxq->hairpin_info.peer_txq_id;
+ peer_dev = &rte_eth_devices[peer_tx_port];
+ cpfl_txq = peer_dev->data->tx_queues[peer_tx_qid];
+
+ /* bind rx queue */
+ mz = cpfl_txq->base.mz;
+ cpfl_rxq->base.rx_ring_phys_addr = mz->iova;
+ cpfl_rxq->base.rx_ring = mz->addr;
+ cpfl_rxq->base.mz = mz;
+
+ /* bind rx buffer queue */
+ mz = cpfl_txq->base.complq->mz;
+ cpfl_rxq->base.bufq1->rx_ring_phys_addr = mz->iova;
+ cpfl_rxq->base.bufq1->rx_ring = mz->addr;
+ cpfl_rxq->base.bufq1->mz = mz;
+ cpfl_rxq->base.bufq1->qrx_tail = hw->hw_addr +
+ cpfl_hw_qtail_get(cpfl_rx_vport->p2p_q_chunks_info->rx_buf_qtail_start,
+ 0, cpfl_rx_vport->p2p_q_chunks_info->rx_buf_qtail_spacing);
+ }
+}
+
static int
cpfl_start_queues(struct rte_eth_dev *dev)
{
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_rx_queue *cpfl_rxq;
struct cpfl_tx_queue *cpfl_txq;
+ int update_flag = 0;
int err = 0;
int i;
+ /* For normal data queues, configure, init and enale Txq.
+ * For non-manual bind hairpin queues, configure Txq.
+ */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
cpfl_txq = dev->data->tx_queues[i];
if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
continue;
- err = cpfl_tx_queue_start(dev, i);
+ if (!cpfl_txq->hairpin_info.hairpin_q) {
+ err = cpfl_tx_queue_start(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
+ return err;
+ }
+ } else if (!cpfl_vport->p2p_manual_bind) {
+ if (update_flag == 0) {
+ err = cpfl_txq_hairpin_info_update(dev,
+ cpfl_txq->hairpin_info.peer_rxp);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to update Tx hairpin queue info");
+ return err;
+ }
+ update_flag = 1;
+ }
+ err = cpfl_hairpin_txq_config(vport, cpfl_txq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u", i);
+ return err;
+ }
+ }
+ }
+
+ /* For non-manual bind hairpin queues, configure Tx completion queue first.*/
+ if (!cpfl_vport->p2p_manual_bind && cpfl_vport->p2p_tx_complq != NULL) {
+ err = cpfl_hairpin_tx_complq_config(cpfl_vport);
if (err != 0) {
- PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
+ PMD_DRV_LOG(ERR, "Fail to config Tx completion queue");
return err;
}
}
+ /* For non-manual bind hairpin queues, configure Rx buffer queue.*/
+ if (!cpfl_vport->p2p_manual_bind && cpfl_vport->p2p_rx_bufq != NULL) {
+ cpfl_rxq_hairpin_mz_bind(dev);
+ err = cpfl_hairpin_rx_bufq_config(cpfl_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue");
+ return err;
+ }
+ }
+
+ /* For normal data queues, configure, init and enale Rxq.
+ * For non-manual bind hairpin queues, configure Rxq, and then init Rxq.
+ */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)
continue;
- err = cpfl_rx_queue_start(dev, i);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
- return err;
+ if (!cpfl_rxq->hairpin_info.hairpin_q) {
+ err = cpfl_rx_queue_start(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
+ return err;
+ }
+ } else if (!cpfl_vport->p2p_manual_bind) {
+ err = cpfl_hairpin_rxq_config(vport, cpfl_rxq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue %u", i);
+ return err;
+ }
+ err = cpfl_rx_queue_init(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u", i);
+ return err;
+ }
}
}
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 90b408d1f4..fd24d544a1 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -922,6 +922,94 @@ cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return ret;
}
+int
+cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport)
+{
+ struct idpf_rx_queue *rx_bufq = cpfl_vport->p2p_rx_bufq;
+ struct virtchnl2_rxq_info rxq_info;
+
+ memset(&rxq_info, 0, sizeof(rxq_info));
+
+ rxq_info.type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ rxq_info.queue_id = rx_bufq->queue_id;
+ rxq_info.ring_len = rx_bufq->nb_rx_desc;
+ rxq_info.dma_ring_addr = rx_bufq->rx_ring_phys_addr;
+ rxq_info.desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ rxq_info.rx_buffer_low_watermark = CPFL_RXBUF_LOW_WATERMARK;
+ rxq_info.model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ rxq_info.data_buffer_size = rx_bufq->rx_buf_len;
+ rxq_info.buffer_notif_stride = CPFL_RX_BUF_STRIDE;
+
+ return idpf_vc_rxq_config_by_info(&cpfl_vport->base, &rxq_info, 1);
+}
+
+int
+cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq)
+{
+ struct virtchnl2_rxq_info rxq_info;
+ struct idpf_rx_queue *rxq = &cpfl_rxq->base;
+
+ memset(&rxq_info, 0, sizeof(rxq_info));
+
+ rxq_info.type = VIRTCHNL2_QUEUE_TYPE_RX;
+ rxq_info.queue_id = rxq->queue_id;
+ rxq_info.ring_len = rxq->nb_rx_desc;
+ rxq_info.dma_ring_addr = rxq->rx_ring_phys_addr;
+ rxq_info.rx_bufq1_id = rxq->bufq1->queue_id;
+ rxq_info.max_pkt_size = vport->max_pkt_len;
+ rxq_info.desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ rxq_info.qflags |= VIRTCHNL2_RX_DESC_SIZE_16BYTE;
+
+ rxq_info.data_buffer_size = rxq->rx_buf_len;
+ rxq_info.model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ rxq_info.rx_buffer_low_watermark = CPFL_RXBUF_LOW_WATERMARK;
+
+ PMD_DRV_LOG(NOTICE, "hairpin: vport %u, Rxq id 0x%x",
+ vport->vport_id, rxq_info.queue_id);
+
+ return idpf_vc_rxq_config_by_info(vport, &rxq_info, 1);
+}
+
+int
+cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport)
+{
+ struct idpf_tx_queue *tx_complq = cpfl_vport->p2p_tx_complq;
+ struct virtchnl2_txq_info txq_info;
+
+ memset(&txq_info, 0, sizeof(txq_info));
+
+ txq_info.dma_ring_addr = tx_complq->tx_ring_phys_addr;
+ txq_info.type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ txq_info.queue_id = tx_complq->queue_id;
+ txq_info.ring_len = tx_complq->nb_tx_desc;
+ txq_info.peer_rx_queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+ txq_info.model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ txq_info.sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+
+ return idpf_vc_txq_config_by_info(&cpfl_vport->base, &txq_info, 1);
+}
+
+int
+cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq)
+{
+ struct idpf_tx_queue *txq = &cpfl_txq->base;
+ struct virtchnl2_txq_info txq_info;
+
+ memset(&txq_info, 0, sizeof(txq_info));
+
+ txq_info.dma_ring_addr = txq->tx_ring_phys_addr;
+ txq_info.type = VIRTCHNL2_QUEUE_TYPE_TX;
+ txq_info.queue_id = txq->queue_id;
+ txq_info.ring_len = txq->nb_tx_desc;
+ txq_info.tx_compl_queue_id = txq->complq->queue_id;
+ txq_info.relative_queue_id = txq->queue_id;
+ txq_info.peer_rx_queue_id = cpfl_txq->hairpin_info.peer_rxq_id;
+ txq_info.model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ txq_info.sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+
+ return idpf_vc_txq_config_by_info(vport, &txq_info, 1);
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 06198d4aad..872ebc1bfd 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -32,12 +32,15 @@
#define CPFL_RING_BASE_ALIGN 128
#define CPFL_DEFAULT_RX_FREE_THRESH 32
+#define CPFL_RXBUF_LOW_WATERMARK 64
#define CPFL_DEFAULT_TX_RS_THRESH 32
#define CPFL_DEFAULT_TX_FREE_THRESH 32
#define CPFL_SUPPORT_CHAIN_NUM 5
+#define CPFL_RX_BUF_STRIDE 64
+
struct cpfl_rxq_hairpin_info {
bool hairpin_q; /* if rx queue is a hairpin queue */
uint16_t peer_txp;
@@ -95,4 +98,8 @@ int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
int cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc,
const struct rte_eth_hairpin_conf *conf);
+int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport);
+int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq);
+int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
+int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
#endif /* _CPFL_RXTX_H_ */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v9 08/14] common/idpf: add switch queue API
2023-06-05 9:06 ` [PATCH v9 00/14] net/cpfl: add hairpin queue support beilei.xing
` (6 preceding siblings ...)
2023-06-05 9:06 ` [PATCH v9 07/14] net/cpfl: support hairpin queue configuration beilei.xing
@ 2023-06-05 9:06 ` beilei.xing
2023-06-05 9:06 ` [PATCH v9 09/14] net/cpfl: support hairpin queue start/stop beilei.xing
` (6 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 9:06 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds idpf_vc_ena_dis_one_queue API.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 2 +-
drivers/common/idpf/idpf_common_virtchnl.h | 3 +++
drivers/common/idpf/version.map | 1 +
3 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index 211b44a88e..6455f640da 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -733,7 +733,7 @@ idpf_vc_vectors_dealloc(struct idpf_vport *vport)
return err;
}
-static int
+int
idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
uint32_t type, bool on)
{
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index db83761a5e..9ff5c38c26 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -71,6 +71,9 @@ __rte_internal
int idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
uint16_t num_qs);
__rte_internal
+int idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
+ uint32_t type, bool on);
+__rte_internal
int idpf_vc_queue_grps_del(struct idpf_vport *vport,
uint16_t num_q_grps,
struct virtchnl2_queue_group_id *qg_ids);
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 17e77884ce..25624732b0 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -40,6 +40,7 @@ INTERNAL {
idpf_vc_cmd_execute;
idpf_vc_ctlq_post_rx_buffs;
idpf_vc_ctlq_recv;
+ idpf_vc_ena_dis_one_queue;
idpf_vc_irq_map_unmap_config;
idpf_vc_one_msg_read;
idpf_vc_ptype_info_query;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v9 09/14] net/cpfl: support hairpin queue start/stop
2023-06-05 9:06 ` [PATCH v9 00/14] net/cpfl: add hairpin queue support beilei.xing
` (7 preceding siblings ...)
2023-06-05 9:06 ` [PATCH v9 08/14] common/idpf: add switch queue API beilei.xing
@ 2023-06-05 9:06 ` beilei.xing
2023-06-05 9:06 ` [PATCH v9 10/14] common/idpf: add irq map config API beilei.xing
` (5 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 9:06 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx hairpin queue start/stop.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 46 +++++++++
drivers/net/cpfl/cpfl_rxtx.c | 164 +++++++++++++++++++++++++++++----
drivers/net/cpfl/cpfl_rxtx.h | 15 +++
3 files changed, 207 insertions(+), 18 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 0696c6bc68..48e956f151 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -896,6 +896,52 @@ cpfl_start_queues(struct rte_eth_dev *dev)
}
}
+ /* For non-manual bind hairpin queues, enable Tx queue and Rx queue,
+ * then enable Tx completion queue and Rx buffer queue.
+ */
+ for (i = cpfl_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq->hairpin_info.hairpin_q && !cpfl_vport->p2p_manual_bind) {
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ i - cpfl_vport->nb_data_txq,
+ false, true);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u on",
+ i);
+ else
+ cpfl_txq->base.q_started = true;
+ }
+ }
+
+ for (i = cpfl_vport->nb_data_rxq; i < dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q && !cpfl_vport->p2p_manual_bind) {
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ i - cpfl_vport->nb_data_rxq,
+ true, true);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u on",
+ i);
+ else
+ cpfl_rxq->base.q_started = true;
+ }
+ }
+
+ if (!cpfl_vport->p2p_manual_bind &&
+ cpfl_vport->p2p_tx_complq != NULL &&
+ cpfl_vport->p2p_rx_bufq != NULL) {
+ err = cpfl_switch_hairpin_complq(cpfl_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq");
+ return err;
+ }
+ err = cpfl_switch_hairpin_bufq(cpfl_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Rx bufq");
+ return err;
+ }
+ }
+
return err;
}
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index fd24d544a1..9d278dca54 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -1010,6 +1010,89 @@ cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq
return idpf_vc_txq_config_by_info(vport, &txq_info, 1);
}
+int
+cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ queue_id = cpfl_vport->p2p_tx_complq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
+int
+cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
+int
+cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t logic_qid,
+ bool rx, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = rx ? VIRTCHNL2_QUEUE_TYPE_RX : VIRTCHNL2_QUEUE_TYPE_TX;
+
+ if (type == VIRTCHNL2_QUEUE_TYPE_RX)
+ queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid, logic_qid);
+ else
+ queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_start_qid, logic_qid);
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+ if (err)
+ return err;
+
+ return err;
+}
+
+static int
+cpfl_alloc_split_p2p_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+ volatile struct virtchnl2_p2p_rx_buf_desc *rxd;
+ struct rte_mbuf *mbuf = NULL;
+ uint64_t dma_addr;
+ uint16_t i;
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ mbuf = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!mbuf)) {
+ PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+ return -ENOMEM;
+ }
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+ rxd = &((volatile struct virtchnl2_p2p_rx_buf_desc *)(rxq->rx_ring))[i];
+ rxd->reserve0 = 0;
+ rxd->pkt_addr = dma_addr;
+ }
+
+ rxq->nb_rx_hold = 0;
+ /* The value written in the RX buffer queue tail register, must be a multiple of 8.*/
+ rxq->rx_tail = rxq->nb_rx_desc - CPFL_HAIRPIN_Q_TAIL_AUX_VALUE;
+
+ return 0;
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
@@ -1063,22 +1146,31 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
IDPF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
} else {
/* Split queue */
- err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
- return err;
- }
- err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
- return err;
+ if (cpfl_rxq->hairpin_info.hairpin_q) {
+ err = cpfl_alloc_split_p2p_rxq_mbufs(rxq->bufq1);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate p2p RX buffer queue mbuf");
+ return err;
+ }
+ } else {
+ err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
+ return err;
+ }
+ err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
+ return err;
+ }
}
rte_wmb();
/* Init the RX tail register. */
IDPF_PCI_REG_WRITE(rxq->bufq1->qrx_tail, rxq->bufq1->rx_tail);
- IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail);
+ if (rxq->bufq2)
+ IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail);
}
return err;
@@ -1185,7 +1277,12 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return -EINVAL;
cpfl_rxq = dev->data->rx_queues[rx_queue_id];
- err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ rx_queue_id - cpfl_vport->nb_data_txq,
+ true, false);
+ else
+ err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
rx_queue_id);
@@ -1199,10 +1296,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
idpf_qc_single_rx_queue_reset(rxq);
} else {
rxq->bufq1->ops->release_mbufs(rxq->bufq1);
- rxq->bufq2->ops->release_mbufs(rxq->bufq2);
- idpf_qc_split_rx_queue_reset(rxq);
+ if (rxq->bufq2)
+ rxq->bufq2->ops->release_mbufs(rxq->bufq2);
+ if (cpfl_rxq->hairpin_info.hairpin_q) {
+ cpfl_rx_hairpin_descq_reset(rxq);
+ cpfl_rx_hairpin_bufq_reset(rxq->bufq1);
+ } else {
+ idpf_qc_split_rx_queue_reset(rxq);
+ }
}
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ if (!cpfl_rxq->hairpin_info.hairpin_q)
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1221,7 +1325,12 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
cpfl_txq = dev->data->tx_queues[tx_queue_id];
- err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
+ if (cpfl_txq->hairpin_info.hairpin_q)
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ tx_queue_id - cpfl_vport->nb_data_txq,
+ false, false);
+ else
+ err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
tx_queue_id);
@@ -1234,10 +1343,17 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
idpf_qc_single_tx_queue_reset(txq);
} else {
- idpf_qc_split_tx_descq_reset(txq);
- idpf_qc_split_tx_complq_reset(txq->complq);
+ if (cpfl_txq->hairpin_info.hairpin_q) {
+ cpfl_tx_hairpin_descq_reset(txq);
+ cpfl_tx_hairpin_complq_reset(txq->complq);
+ } else {
+ idpf_qc_split_tx_descq_reset(txq);
+ idpf_qc_split_tx_complq_reset(txq->complq);
+ }
}
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ if (!cpfl_txq->hairpin_info.hairpin_q)
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1257,10 +1373,22 @@ cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
void
cpfl_stop_queues(struct rte_eth_dev *dev)
{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
struct cpfl_rx_queue *cpfl_rxq;
struct cpfl_tx_queue *cpfl_txq;
int i;
+ if (cpfl_vport->p2p_tx_complq != NULL) {
+ if (cpfl_switch_hairpin_complq(cpfl_vport, false) != 0)
+ PMD_DRV_LOG(ERR, "Failed to stop hairpin Tx complq");
+ }
+
+ if (cpfl_vport->p2p_rx_bufq != NULL) {
+ if (cpfl_switch_hairpin_bufq(cpfl_vport, false) != 0)
+ PMD_DRV_LOG(ERR, "Failed to stop hairpin Rx bufq");
+ }
+
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
if (cpfl_rxq == NULL)
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 872ebc1bfd..aacd087b56 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -41,6 +41,17 @@
#define CPFL_RX_BUF_STRIDE 64
+/* The value written in the RX buffer queue tail register,
+ * and in WritePTR field in the TX completion queue context,
+ * must be a multiple of 8.
+ */
+#define CPFL_HAIRPIN_Q_TAIL_AUX_VALUE 8
+
+struct virtchnl2_p2p_rx_buf_desc {
+ __le64 reserve0;
+ __le64 pkt_addr; /* Packet buffer address */
+};
+
struct cpfl_rxq_hairpin_info {
bool hairpin_q; /* if rx queue is a hairpin queue */
uint16_t peer_txp;
@@ -102,4 +113,8 @@ int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq);
int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
+int cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t qid,
+ bool rx, bool on);
#endif /* _CPFL_RXTX_H_ */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v9 10/14] common/idpf: add irq map config API
2023-06-05 9:06 ` [PATCH v9 00/14] net/cpfl: add hairpin queue support beilei.xing
` (8 preceding siblings ...)
2023-06-05 9:06 ` [PATCH v9 09/14] net/cpfl: support hairpin queue start/stop beilei.xing
@ 2023-06-05 9:06 ` beilei.xing
2023-06-05 9:06 ` [PATCH v9 11/14] net/cpfl: enable write back based on ITR expire beilei.xing
` (4 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 9:06 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch supports idpf_vport_irq_map_config_by_qids API.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_device.c | 75 ++++++++++++++++++++++++
drivers/common/idpf/idpf_common_device.h | 4 ++
drivers/common/idpf/version.map | 1 +
3 files changed, 80 insertions(+)
diff --git a/drivers/common/idpf/idpf_common_device.c b/drivers/common/idpf/idpf_common_device.c
index dc47551b17..cc4207a46e 100644
--- a/drivers/common/idpf/idpf_common_device.c
+++ b/drivers/common/idpf/idpf_common_device.c
@@ -667,6 +667,81 @@ idpf_vport_irq_map_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
return ret;
}
+int
+idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport, uint32_t *qids, uint16_t nb_rx_queues)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_queue_vector *qv_map;
+ struct idpf_hw *hw = &adapter->hw;
+ uint32_t dynctl_val, itrn_val;
+ uint32_t dynctl_reg_start;
+ uint32_t itrn_reg_start;
+ uint16_t i;
+ int ret;
+
+ qv_map = rte_zmalloc("qv_map",
+ nb_rx_queues *
+ sizeof(struct virtchnl2_queue_vector), 0);
+ if (qv_map == NULL) {
+ DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
+ nb_rx_queues);
+ ret = -ENOMEM;
+ goto qv_map_alloc_err;
+ }
+
+ /* Rx interrupt disabled, Map interrupt only for writeback */
+
+ /* The capability flags adapter->caps.other_caps should be
+ * compared with bit VIRTCHNL2_CAP_WB_ON_ITR here. The if
+ * condition should be updated when the FW can return the
+ * correct flag bits.
+ */
+ dynctl_reg_start =
+ vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
+ itrn_reg_start =
+ vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
+ dynctl_val = IDPF_READ_REG(hw, dynctl_reg_start);
+ DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x", dynctl_val);
+ itrn_val = IDPF_READ_REG(hw, itrn_reg_start);
+ DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val);
+ /* Force write-backs by setting WB_ON_ITR bit in DYN_CTL
+ * register. WB_ON_ITR and INTENA are mutually exclusive
+ * bits. Setting WB_ON_ITR bits means TX and RX Descs
+ * are written back based on ITR expiration irrespective
+ * of INTENA setting.
+ */
+ /* TBD: need to tune INTERVAL value for better performance. */
+ itrn_val = (itrn_val == 0) ? IDPF_DFLT_INTERVAL : itrn_val;
+ dynctl_val = VIRTCHNL2_ITR_IDX_0 <<
+ PF_GLINT_DYN_CTL_ITR_INDX_S |
+ PF_GLINT_DYN_CTL_WB_ON_ITR_M |
+ itrn_val << PF_GLINT_DYN_CTL_INTERVAL_S;
+ IDPF_WRITE_REG(hw, dynctl_reg_start, dynctl_val);
+
+ for (i = 0; i < nb_rx_queues; i++) {
+ /* map all queues to the same vector */
+ qv_map[i].queue_id = qids[i];
+ qv_map[i].vector_id =
+ vport->recv_vectors->vchunks.vchunks->start_vector_id;
+ }
+ vport->qv_map = qv_map;
+
+ ret = idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, true);
+ if (ret != 0) {
+ DRV_LOG(ERR, "config interrupt mapping failed");
+ goto config_irq_map_err;
+ }
+
+ return 0;
+
+config_irq_map_err:
+ rte_free(vport->qv_map);
+ vport->qv_map = NULL;
+
+qv_map_alloc_err:
+ return ret;
+}
+
int
idpf_vport_irq_unmap_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
{
diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index 112367dae8..f767ea7cec 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -200,5 +200,9 @@ int idpf_vport_info_init(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_info);
__rte_internal
void idpf_vport_stats_update(struct virtchnl2_vport_stats *oes, struct virtchnl2_vport_stats *nes);
+__rte_internal
+int idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport,
+ uint32_t *qids,
+ uint16_t nb_rx_queues);
#endif /* _IDPF_COMMON_DEVICE_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 25624732b0..0729f6b912 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -69,6 +69,7 @@ INTERNAL {
idpf_vport_info_init;
idpf_vport_init;
idpf_vport_irq_map_config;
+ idpf_vport_irq_map_config_by_qids;
idpf_vport_irq_unmap_config;
idpf_vport_rss_config;
idpf_vport_stats_update;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v9 11/14] net/cpfl: enable write back based on ITR expire
2023-06-05 9:06 ` [PATCH v9 00/14] net/cpfl: add hairpin queue support beilei.xing
` (9 preceding siblings ...)
2023-06-05 9:06 ` [PATCH v9 10/14] common/idpf: add irq map config API beilei.xing
@ 2023-06-05 9:06 ` beilei.xing
2023-06-05 9:06 ` [PATCH v9 12/14] net/cpfl: support peer ports get beilei.xing
` (3 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 9:06 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch enables write back based on ITR expire
(WR_ON_ITR) for hairpin queues.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 48e956f151..4502f04130 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -735,11 +735,22 @@ cpfl_dev_configure(struct rte_eth_dev *dev)
static int
cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
{
+ uint32_t qids[CPFL_MAX_P2P_NB_QUEUES + IDPF_DEFAULT_RXQ_NUM] = {0};
struct cpfl_vport *cpfl_vport = dev->data->dev_private;
struct idpf_vport *vport = &cpfl_vport->base;
uint16_t nb_rx_queues = dev->data->nb_rx_queues;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i;
- return idpf_vport_irq_map_config(vport, nb_rx_queues);
+ for (i = 0; i < nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ qids[i] = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid,
+ (i - cpfl_vport->nb_data_rxq));
+ else
+ qids[i] = cpfl_hw_qid_get(vport->chunks_info.rx_start_qid, i);
+ }
+ return idpf_vport_irq_map_config_by_qids(vport, qids, nb_rx_queues);
}
/* Update hairpin_info for dev's tx hairpin queue */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v9 12/14] net/cpfl: support peer ports get
2023-06-05 9:06 ` [PATCH v9 00/14] net/cpfl: add hairpin queue support beilei.xing
` (10 preceding siblings ...)
2023-06-05 9:06 ` [PATCH v9 11/14] net/cpfl: enable write back based on ITR expire beilei.xing
@ 2023-06-05 9:06 ` beilei.xing
2023-06-05 9:06 ` [PATCH v9 13/14] net/cpfl: support hairpin bind/unbind beilei.xing
` (2 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 9:06 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports get hairpin peer ports.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 41 ++++++++++++++++++++++++++++++++++
1 file changed, 41 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 4502f04130..49d1b8b58b 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1080,6 +1080,46 @@ cpfl_dev_close(struct rte_eth_dev *dev)
return 0;
}
+static int
+cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
+ size_t len, uint32_t tx)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_tx_queue *txq;
+ struct idpf_rx_queue *rxq;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i;
+ int j = 0;
+
+ if (len <= 0)
+ return -EINVAL;
+
+ if (cpfl_vport->p2p_q_chunks_info == NULL)
+ return -ENOTSUP;
+
+ if (tx > 0) {
+ for (i = cpfl_vport->nb_data_txq, j = 0; i < dev->data->nb_tx_queues; i++, j++) {
+ txq = dev->data->tx_queues[i];
+ if (txq == NULL)
+ return -EINVAL;
+ cpfl_txq = (struct cpfl_tx_queue *)txq;
+ peer_ports[j] = cpfl_txq->hairpin_info.peer_rxp;
+ }
+ } else if (tx == 0) {
+ for (i = cpfl_vport->nb_data_rxq, j = 0; i < dev->data->nb_rx_queues; i++, j++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq == NULL)
+ return -EINVAL;
+ cpfl_rxq = (struct cpfl_rx_queue *)rxq;
+ peer_ports[j] = cpfl_rxq->hairpin_info.peer_txp;
+ }
+ }
+
+ return j;
+}
+
static const struct eth_dev_ops cpfl_eth_dev_ops = {
.dev_configure = cpfl_dev_configure,
.dev_close = cpfl_dev_close,
@@ -1109,6 +1149,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.hairpin_cap_get = cpfl_hairpin_cap_get,
.rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
.tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
+ .hairpin_get_peer_ports = cpfl_hairpin_get_peer_ports,
};
static int
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v9 13/14] net/cpfl: support hairpin bind/unbind
2023-06-05 9:06 ` [PATCH v9 00/14] net/cpfl: add hairpin queue support beilei.xing
` (11 preceding siblings ...)
2023-06-05 9:06 ` [PATCH v9 12/14] net/cpfl: support peer ports get beilei.xing
@ 2023-06-05 9:06 ` beilei.xing
2023-06-05 9:06 ` [PATCH v9 14/14] doc: update the doc of CPFL PMD beilei.xing
2023-06-06 10:03 ` [PATCH v10 00/14] net/cpfl: add hairpin queue support beilei.xing
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 9:06 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports hairpin_bind/unbind ops.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 137 +++++++++++++++++++++++++++++++++
1 file changed, 137 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 49d1b8b58b..ac97622a15 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1120,6 +1120,141 @@ cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
return j;
}
+static int
+cpfl_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct idpf_vport *tx_vport = &cpfl_tx_vport->base;
+ struct cpfl_vport *cpfl_rx_vport;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct rte_eth_dev *peer_dev;
+ struct idpf_vport *rx_vport;
+ int err = 0;
+ int i;
+
+ err = cpfl_txq_hairpin_info_update(dev, rx_port);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to update Tx hairpin queue info.");
+ return err;
+ }
+
+ /* configure hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ err = cpfl_hairpin_txq_config(tx_vport, cpfl_txq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u", i);
+ return err;
+ }
+ }
+
+ err = cpfl_hairpin_tx_complq_config(cpfl_tx_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Tx completion queue");
+ return err;
+ }
+
+ peer_dev = &rte_eth_devices[rx_port];
+ cpfl_rx_vport = (struct cpfl_vport *)peer_dev->data->dev_private;
+ rx_vport = &cpfl_rx_vport->base;
+ cpfl_rxq_hairpin_mz_bind(peer_dev);
+
+ err = cpfl_hairpin_rx_bufq_config(cpfl_rx_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue");
+ return err;
+ }
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ err = cpfl_hairpin_rxq_config(rx_vport, cpfl_rxq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue %u", i);
+ return err;
+ }
+ err = cpfl_rx_queue_init(peer_dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u", i);
+ return err;
+ }
+ }
+
+ /* enable hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport,
+ i - cpfl_tx_vport->nb_data_txq,
+ false, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u on",
+ i);
+ return err;
+ }
+ cpfl_txq->base.q_started = true;
+ }
+
+ err = cpfl_switch_hairpin_complq(cpfl_tx_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq");
+ return err;
+ }
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport,
+ i - cpfl_rx_vport->nb_data_rxq,
+ true, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u on",
+ i);
+ }
+ cpfl_rxq->base.q_started = true;
+ }
+
+ err = cpfl_switch_hairpin_bufq(cpfl_rx_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Rx buffer queue");
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+cpfl_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port];
+ struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i;
+
+ /* disable hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport,
+ i - cpfl_tx_vport->nb_data_txq,
+ false, false);
+ cpfl_txq->base.q_started = false;
+ }
+
+ cpfl_switch_hairpin_complq(cpfl_tx_vport, false);
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport,
+ i - cpfl_rx_vport->nb_data_rxq,
+ true, false);
+ cpfl_rxq->base.q_started = false;
+ }
+
+ cpfl_switch_hairpin_bufq(cpfl_rx_vport, false);
+
+ return 0;
+}
+
static const struct eth_dev_ops cpfl_eth_dev_ops = {
.dev_configure = cpfl_dev_configure,
.dev_close = cpfl_dev_close,
@@ -1150,6 +1285,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
.tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
.hairpin_get_peer_ports = cpfl_hairpin_get_peer_ports,
+ .hairpin_bind = cpfl_hairpin_bind,
+ .hairpin_unbind = cpfl_hairpin_unbind,
};
static int
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v9 14/14] doc: update the doc of CPFL PMD
2023-06-05 9:06 ` [PATCH v9 00/14] net/cpfl: add hairpin queue support beilei.xing
` (12 preceding siblings ...)
2023-06-05 9:06 ` [PATCH v9 13/14] net/cpfl: support hairpin bind/unbind beilei.xing
@ 2023-06-05 9:06 ` beilei.xing
2023-06-06 10:03 ` [PATCH v10 00/14] net/cpfl: add hairpin queue support beilei.xing
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-05 9:06 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
Update cpfl.rst to clarify hairpin support.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
doc/guides/nics/cpfl.rst | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index d25db088eb..8d5c3082e4 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -106,3 +106,10 @@ The paths are chosen based on 2 conditions:
A value "P" means the offload feature is not supported by vector path.
If any not supported features are used, cpfl vector PMD is disabled
and the scalar paths are chosen.
+
+Hairpin queue
+~~~~~~~~~~~~~
+
+ E2100 Series can loopback packets from RX port to TX port, this feature is
+ called port-to-port or hairpin.
+ Currently, the PMD only supports single port hairpin.
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH v8 12/14] net/cpfl: support peer ports get
2023-06-05 6:17 ` [PATCH v8 12/14] net/cpfl: support peer ports get beilei.xing
@ 2023-06-05 11:22 ` Wu, Jingjing
0 siblings, 0 replies; 164+ messages in thread
From: Wu, Jingjing @ 2023-06-05 11:22 UTC (permalink / raw)
To: Xing, Beilei; +Cc: dev, Liu, Mingxia, Wang, Xiao W
> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Monday, June 5, 2023 2:17 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>; Wang, Xiao W <xiao.w.wang@intel.com>
> Subject: [PATCH v8 12/14] net/cpfl: support peer ports get
>
> From: Beilei Xing <beilei.xing@intel.com>
>
> This patch supports get hairpin peer ports.
>
> Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
> Signed-off-by: Beilei Xing <beilei.xing@intel.com>
> ---
> drivers/net/cpfl/cpfl_ethdev.c | 41 ++++++++++++++++++++++++++++++++++
> 1 file changed, 41 insertions(+)
>
> diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
> index 850f1c0bc6..1a1ca4bc77 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.c
> +++ b/drivers/net/cpfl/cpfl_ethdev.c
> @@ -1080,6 +1080,46 @@ cpfl_dev_close(struct rte_eth_dev *dev)
> return 0;
> }
>
> +static int
> +cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
> + size_t len, uint32_t tx)
> +{
> + struct cpfl_vport *cpfl_vport =
> + (struct cpfl_vport *)dev->data->dev_private;
> + struct idpf_tx_queue *txq;
> + struct idpf_rx_queue *rxq;
> + struct cpfl_tx_queue *cpfl_txq;
> + struct cpfl_rx_queue *cpfl_rxq;
> + int i;
> + int j = 0;
> +
> + if (len <= 0)
> + return -EINVAL;
> +
> + if (cpfl_vport->p2p_q_chunks_info == NULL)
> + return -ENOTSUP;
> +
> + if (tx > 0) {
> + for (i = cpfl_vport->nb_data_txq, j = 0; i < dev->data->nb_tx_queues; i++,
> j++) {
> + txq = dev->data->tx_queues[i];
> + if (txq == NULL)
> + return -EINVAL;
> + cpfl_txq = (struct cpfl_tx_queue *)txq;
> + peer_ports[j] = cpfl_txq->hairpin_info.peer_rxp;
Shouldn't access the peer_ports[j] if j >= len.
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH v10 00/14] net/cpfl: add hairpin queue support
2023-06-06 10:03 ` [PATCH v10 00/14] net/cpfl: add hairpin queue support beilei.xing
@ 2023-06-06 6:40 ` Wu, Jingjing
2023-06-07 7:16 ` Zhang, Qi Z
2023-06-06 10:03 ` [PATCH v10 01/14] net/cpfl: refine structures beilei.xing
` (13 subsequent siblings)
14 siblings, 1 reply; 164+ messages in thread
From: Wu, Jingjing @ 2023-06-06 6:40 UTC (permalink / raw)
To: Xing, Beilei; +Cc: dev, Liu, Mingxia
> -----Original Message-----
> From: Xing, Beilei <beilei.xing@intel.com>
> Sent: Tuesday, June 6, 2023 6:03 PM
> To: Wu, Jingjing <jingjing.wu@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> <beilei.xing@intel.com>
> Subject: [PATCH v10 00/14] net/cpfl: add hairpin queue support
>
> From: Beilei Xing <beilei.xing@intel.com>
>
> This patchset adds hairpin queue support.
>
> v2 changes:
> - change hairpin rx queus configuration sequence.
> - code refine.
>
> v3 changes:
> - Refine the patchset based on the latest code.
>
> v4 change:
> - Remove hairpin rx buffer queue's sw_ring.
> - Change hairpin rx queus configuration sequence in cpfl_hairpin_bind function.
> - Refind hairpin queue setup and release.
>
> v5 change:
> - Fix memory leak during queue setup.
> - Refine hairpin Rxq/Txq start/stop.
>
> v6 change:
> - Add sign-off.
>
> v7 change:
> - Update cpfl.rst
>
> v8 change:
> - Fix Intel-compilation failure.
>
> v9 change:
> - Fix memory leak if fail to init queue group.
> - Change log level.
>
> v10 change:
> - Avoid accessing out-of-bounds.
>
> Beilei Xing (14):
> net/cpfl: refine structures
> common/idpf: support queue groups add/delete
> net/cpfl: add haipin queue group during vport init
> net/cpfl: support hairpin queue capbility get
> net/cpfl: support hairpin queue setup and release
> common/idpf: add queue config API
> net/cpfl: support hairpin queue configuration
> common/idpf: add switch queue API
> net/cpfl: support hairpin queue start/stop
> common/idpf: add irq map config API
> net/cpfl: enable write back based on ITR expire
> net/cpfl: support peer ports get
> net/cpfl: support hairpin bind/unbind
> doc: update the doc of CPFL PMD
>
> doc/guides/nics/cpfl.rst | 7 +
> drivers/common/idpf/idpf_common_device.c | 75 ++
> drivers/common/idpf/idpf_common_device.h | 4 +
> drivers/common/idpf/idpf_common_virtchnl.c | 138 +++-
> drivers/common/idpf/idpf_common_virtchnl.h | 18 +
> drivers/common/idpf/version.map | 6 +
> drivers/net/cpfl/cpfl_ethdev.c | 613 ++++++++++++++--
> drivers/net/cpfl/cpfl_ethdev.h | 35 +-
> drivers/net/cpfl/cpfl_rxtx.c | 789 +++++++++++++++++++--
> drivers/net/cpfl/cpfl_rxtx.h | 76 ++
> drivers/net/cpfl/cpfl_rxtx_vec_common.h | 21 +-
> 11 files changed, 1663 insertions(+), 119 deletions(-)
>
> --
> 2.26.2
Acked-by: Jingjing Wu <jingjing.wu@intel.com>
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v10 00/14] net/cpfl: add hairpin queue support
2023-06-05 9:06 ` [PATCH v9 00/14] net/cpfl: add hairpin queue support beilei.xing
` (13 preceding siblings ...)
2023-06-05 9:06 ` [PATCH v9 14/14] doc: update the doc of CPFL PMD beilei.xing
@ 2023-06-06 10:03 ` beilei.xing
2023-06-06 6:40 ` Wu, Jingjing
` (14 more replies)
14 siblings, 15 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-06 10:03 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patchset adds hairpin queue support.
v2 changes:
- change hairpin rx queus configuration sequence.
- code refine.
v3 changes:
- Refine the patchset based on the latest code.
v4 change:
- Remove hairpin rx buffer queue's sw_ring.
- Change hairpin rx queus configuration sequence in cpfl_hairpin_bind function.
- Refind hairpin queue setup and release.
v5 change:
- Fix memory leak during queue setup.
- Refine hairpin Rxq/Txq start/stop.
v6 change:
- Add sign-off.
v7 change:
- Update cpfl.rst
v8 change:
- Fix Intel-compilation failure.
v9 change:
- Fix memory leak if fail to init queue group.
- Change log level.
v10 change:
- Avoid accessing out-of-bounds.
Beilei Xing (14):
net/cpfl: refine structures
common/idpf: support queue groups add/delete
net/cpfl: add haipin queue group during vport init
net/cpfl: support hairpin queue capbility get
net/cpfl: support hairpin queue setup and release
common/idpf: add queue config API
net/cpfl: support hairpin queue configuration
common/idpf: add switch queue API
net/cpfl: support hairpin queue start/stop
common/idpf: add irq map config API
net/cpfl: enable write back based on ITR expire
net/cpfl: support peer ports get
net/cpfl: support hairpin bind/unbind
doc: update the doc of CPFL PMD
doc/guides/nics/cpfl.rst | 7 +
drivers/common/idpf/idpf_common_device.c | 75 ++
drivers/common/idpf/idpf_common_device.h | 4 +
drivers/common/idpf/idpf_common_virtchnl.c | 138 +++-
drivers/common/idpf/idpf_common_virtchnl.h | 18 +
drivers/common/idpf/version.map | 6 +
drivers/net/cpfl/cpfl_ethdev.c | 613 ++++++++++++++--
drivers/net/cpfl/cpfl_ethdev.h | 35 +-
drivers/net/cpfl/cpfl_rxtx.c | 789 +++++++++++++++++++--
drivers/net/cpfl/cpfl_rxtx.h | 76 ++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 21 +-
11 files changed, 1663 insertions(+), 119 deletions(-)
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v10 01/14] net/cpfl: refine structures
2023-06-06 10:03 ` [PATCH v10 00/14] net/cpfl: add hairpin queue support beilei.xing
2023-06-06 6:40 ` Wu, Jingjing
@ 2023-06-06 10:03 ` beilei.xing
2023-06-06 10:03 ` [PATCH v10 02/14] common/idpf: support queue groups add/delete beilei.xing
` (12 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-06 10:03 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch refines some structures to support hairpin queue,
cpfl_rx_queue/cpfl_tx_queue/cpfl_vport.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 85 +++++++-----
drivers/net/cpfl/cpfl_ethdev.h | 6 +-
drivers/net/cpfl/cpfl_rxtx.c | 175 +++++++++++++++++-------
drivers/net/cpfl/cpfl_rxtx.h | 8 ++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 17 +--
5 files changed, 196 insertions(+), 95 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 7528a14d05..e587155db6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -124,7 +124,8 @@ static int
cpfl_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct rte_eth_link new_link;
unsigned int i;
@@ -156,7 +157,8 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,
static int
cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
dev_info->max_rx_queues = base->caps.max_rx_q;
@@ -216,7 +218,8 @@ cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
static int
cpfl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
/* mtu setting is forbidden if port is start */
if (dev->data->dev_started) {
@@ -256,12 +259,12 @@ static uint64_t
cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
{
uint64_t mbuf_alloc_failed = 0;
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i = 0;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- mbuf_alloc_failed += __atomic_load_n(&rxq->rx_stats.mbuf_alloc_failed,
+ cpfl_rxq = dev->data->rx_queues[i];
+ mbuf_alloc_failed += __atomic_load_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,
__ATOMIC_RELAXED);
}
@@ -271,8 +274,8 @@ cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
static int
cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
int ret;
@@ -305,20 +308,20 @@ cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
static void
cpfl_reset_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- __atomic_store_n(&rxq->rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
+ cpfl_rxq = dev->data->rx_queues[i];
+ __atomic_store_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0, __ATOMIC_RELAXED);
}
}
static int
cpfl_dev_stats_reset(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
int ret;
@@ -343,8 +346,8 @@ static int cpfl_dev_xstats_reset(struct rte_eth_dev *dev)
static int cpfl_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned int n)
{
- struct idpf_vport *vport =
- (struct idpf_vport *)dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
unsigned int i;
int ret;
@@ -459,7 +462,8 @@ cpfl_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t idx, shift;
int ret = 0;
@@ -498,7 +502,8 @@ cpfl_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t idx, shift;
int ret = 0;
@@ -536,7 +541,8 @@ static int
cpfl_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
int ret = 0;
@@ -601,7 +607,8 @@ static int
cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
int ret = 0;
@@ -638,7 +645,8 @@ cpfl_rss_hash_conf_get(struct rte_eth_dev *dev,
static int
cpfl_dev_configure(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct rte_eth_conf *conf = &dev->data->dev_conf;
struct idpf_adapter *base = vport->adapter;
int ret;
@@ -710,7 +718,8 @@ cpfl_dev_configure(struct rte_eth_dev *dev)
static int
cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
uint16_t nb_rx_queues = dev->data->nb_rx_queues;
return idpf_vport_irq_map_config(vport, nb_rx_queues);
@@ -719,14 +728,14 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
static int
cpfl_start_queues(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
- struct idpf_tx_queue *txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
int err = 0;
int i;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL || txq->tx_deferred_start)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
continue;
err = cpfl_tx_queue_start(dev, i);
if (err != 0) {
@@ -736,8 +745,8 @@ cpfl_start_queues(struct rte_eth_dev *dev)
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- if (rxq == NULL || rxq->rx_deferred_start)
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)
continue;
err = cpfl_rx_queue_start(dev, i);
if (err != 0) {
@@ -752,7 +761,8 @@ cpfl_start_queues(struct rte_eth_dev *dev)
static int
cpfl_dev_start(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(base);
uint16_t num_allocated_vectors = base->caps.num_allocated_vectors;
@@ -813,7 +823,8 @@ cpfl_dev_start(struct rte_eth_dev *dev)
static int
cpfl_dev_stop(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
if (dev->data->dev_started == 0)
return 0;
@@ -832,7 +843,8 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
static int
cpfl_dev_close(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
@@ -842,7 +854,7 @@ cpfl_dev_close(struct rte_eth_dev *dev)
adapter->cur_vport_nb--;
dev->data->dev_private = NULL;
adapter->vports[vport->sw_idx] = NULL;
- rte_free(vport);
+ rte_free(cpfl_vport);
return 0;
}
@@ -1047,7 +1059,7 @@ cpfl_find_vport(struct cpfl_adapter_ext *adapter, uint32_t vport_id)
int i;
for (i = 0; i < adapter->cur_vport_nb; i++) {
- vport = adapter->vports[i];
+ vport = &adapter->vports[i]->base;
if (vport->vport_id != vport_id)
continue;
else
@@ -1275,7 +1287,8 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter)
static int
cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_vport_param *param = init_params;
struct cpfl_adapter_ext *adapter = param->adapter;
/* for sending create vport virtchnl msg prepare */
@@ -1300,7 +1313,7 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
goto err;
}
- adapter->vports[param->idx] = vport;
+ adapter->vports[param->idx] = cpfl_vport;
adapter->cur_vports |= RTE_BIT32(param->devarg_id);
adapter->cur_vport_nb++;
@@ -1415,7 +1428,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
snprintf(name, sizeof(name), "cpfl_%s_vport_0",
pci_dev->device.name);
retval = rte_eth_dev_create(&pci_dev->device, name,
- sizeof(struct idpf_vport),
+ sizeof(struct cpfl_vport),
NULL, NULL, cpfl_dev_vport_init,
&vport_param);
if (retval != 0)
@@ -1433,7 +1446,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
pci_dev->device.name,
devargs.req_vports[i]);
retval = rte_eth_dev_create(&pci_dev->device, name,
- sizeof(struct idpf_vport),
+ sizeof(struct cpfl_vport),
NULL, NULL, cpfl_dev_vport_init,
&vport_param);
if (retval != 0)
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 200dfcac02..81fe9ac4c3 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -69,13 +69,17 @@ struct cpfl_devargs {
uint16_t req_vport_nb;
};
+struct cpfl_vport {
+ struct idpf_vport base;
+};
+
struct cpfl_adapter_ext {
TAILQ_ENTRY(cpfl_adapter_ext) next;
struct idpf_adapter base;
char name[CPFL_ADAPTER_NAME_LEN];
- struct idpf_vport **vports;
+ struct cpfl_vport **vports;
uint16_t max_vport_nb;
uint16_t cur_vports; /* bit mask of created vport */
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 75021c3c54..04a51b8d15 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -128,7 +128,8 @@ cpfl_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *rxq,
uint16_t nb_desc, unsigned int socket_id,
struct rte_mempool *mp, uint8_t bufq_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct idpf_hw *hw = &base->hw;
const struct rte_memzone *mz;
@@ -220,15 +221,69 @@ cpfl_rx_split_bufq_release(struct idpf_rx_queue *bufq)
rte_free(bufq);
}
+static void
+cpfl_rx_queue_release(void *rxq)
+{
+ struct cpfl_rx_queue *cpfl_rxq = rxq;
+ struct idpf_rx_queue *q = NULL;
+
+ if (cpfl_rxq == NULL)
+ return;
+
+ q = &cpfl_rxq->base;
+
+ /* Split queue */
+ if (!q->adapter->is_rx_singleq) {
+ if (q->bufq2)
+ cpfl_rx_split_bufq_release(q->bufq2);
+
+ if (q->bufq1)
+ cpfl_rx_split_bufq_release(q->bufq1);
+
+ rte_free(cpfl_rxq);
+ return;
+ }
+
+ /* Single queue */
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(cpfl_rxq);
+}
+
+static void
+cpfl_tx_queue_release(void *txq)
+{
+ struct cpfl_tx_queue *cpfl_txq = txq;
+ struct idpf_tx_queue *q = NULL;
+
+ if (cpfl_txq == NULL)
+ return;
+
+ q = &cpfl_txq->base;
+
+ if (q->complq) {
+ rte_memzone_free(q->complq->mz);
+ rte_free(q->complq);
+ }
+
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(cpfl_txq);
+}
+
int
cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
struct idpf_hw *hw = &base->hw;
+ struct cpfl_rx_queue *cpfl_rxq;
const struct rte_memzone *mz;
struct idpf_rx_queue *rxq;
uint16_t rx_free_thresh;
@@ -248,21 +303,23 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Free memory if needed */
if (dev->data->rx_queues[queue_idx] != NULL) {
- idpf_qc_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
dev->data->rx_queues[queue_idx] = NULL;
}
/* Setup Rx queue */
- rxq = rte_zmalloc_socket("cpfl rxq",
- sizeof(struct idpf_rx_queue),
+ cpfl_rxq = rte_zmalloc_socket("cpfl rxq",
+ sizeof(struct cpfl_rx_queue),
RTE_CACHE_LINE_SIZE,
socket_id);
- if (rxq == NULL) {
+ if (cpfl_rxq == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
ret = -ENOMEM;
goto err_rxq_alloc;
}
+ rxq = &cpfl_rxq->base;
+
is_splitq = !!(vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
rxq->mp = mp;
@@ -329,7 +386,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
rxq->q_set = true;
- dev->data->rx_queues[queue_idx] = rxq;
+ dev->data->rx_queues[queue_idx] = cpfl_rxq;
return 0;
@@ -349,7 +406,8 @@ cpfl_tx_complq_setup(struct rte_eth_dev *dev, struct idpf_tx_queue *txq,
uint16_t queue_idx, uint16_t nb_desc,
unsigned int socket_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
const struct rte_memzone *mz;
struct idpf_tx_queue *cq;
int ret;
@@ -397,9 +455,11 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uint16_t tx_rs_thresh, tx_free_thresh;
+ struct cpfl_tx_queue *cpfl_txq;
struct idpf_hw *hw = &base->hw;
const struct rte_memzone *mz;
struct idpf_tx_queue *txq;
@@ -419,21 +479,23 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Free memory if needed. */
if (dev->data->tx_queues[queue_idx] != NULL) {
- idpf_qc_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
dev->data->tx_queues[queue_idx] = NULL;
}
/* Allocate the TX queue data structure. */
- txq = rte_zmalloc_socket("cpfl txq",
- sizeof(struct idpf_tx_queue),
+ cpfl_txq = rte_zmalloc_socket("cpfl txq",
+ sizeof(struct cpfl_tx_queue),
RTE_CACHE_LINE_SIZE,
socket_id);
- if (txq == NULL) {
+ if (cpfl_txq == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
ret = -ENOMEM;
goto err_txq_alloc;
}
+ txq = &cpfl_txq->base;
+
is_splitq = !!(vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
txq->nb_tx_desc = nb_desc;
@@ -487,7 +549,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
queue_idx * vport->chunks_info.tx_qtail_spacing);
txq->ops = &def_txq_ops;
txq->q_set = true;
- dev->data->tx_queues[queue_idx] = txq;
+ dev->data->tx_queues[queue_idx] = cpfl_txq;
return 0;
@@ -503,6 +565,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
+ struct cpfl_rx_queue *cpfl_rxq;
struct idpf_rx_queue *rxq;
uint16_t max_pkt_len;
uint32_t frame_size;
@@ -511,7 +574,8 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (rx_queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
- rxq = dev->data->rx_queues[rx_queue_id];
+ cpfl_rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = &cpfl_rxq->base;
if (rxq == NULL || !rxq->q_set) {
PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
@@ -575,9 +639,10 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_rx_queue *rxq =
- dev->data->rx_queues[rx_queue_id];
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq = dev->data->rx_queues[rx_queue_id];
+ struct idpf_rx_queue *rxq = &cpfl_rxq->base;
int err = 0;
err = idpf_vc_rxq_config(vport, rxq);
@@ -610,15 +675,15 @@ cpfl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
if (tx_queue_id >= dev->data->nb_tx_queues)
return -EINVAL;
- txq = dev->data->tx_queues[tx_queue_id];
+ cpfl_txq = dev->data->tx_queues[tx_queue_id];
/* Init the RX tail register. */
- IDPF_PCI_REG_WRITE(txq->qtx_tail, 0);
+ IDPF_PCI_REG_WRITE(cpfl_txq->base.qtx_tail, 0);
return 0;
}
@@ -626,12 +691,13 @@ cpfl_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
int
cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_tx_queue *txq =
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_tx_queue *cpfl_txq =
dev->data->tx_queues[tx_queue_id];
int err = 0;
- err = idpf_vc_txq_config(vport, txq);
+ err = idpf_vc_txq_config(vport, &cpfl_txq->base);
if (err != 0) {
PMD_DRV_LOG(ERR, "Fail to configure Tx queue %u", tx_queue_id);
return err;
@@ -650,7 +716,7 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
tx_queue_id);
} else {
- txq->q_started = true;
+ cpfl_txq->base.q_started = true;
dev->data->tx_queue_state[tx_queue_id] =
RTE_ETH_QUEUE_STATE_STARTED;
}
@@ -661,13 +727,16 @@ cpfl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
int
cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq;
struct idpf_rx_queue *rxq;
int err;
if (rx_queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
+ cpfl_rxq = dev->data->rx_queues[rx_queue_id];
err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
@@ -675,7 +744,7 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return err;
}
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = &cpfl_rxq->base;
rxq->q_started = false;
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
rxq->ops->release_mbufs(rxq);
@@ -693,13 +762,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_tx_queue *cpfl_txq;
struct idpf_tx_queue *txq;
int err;
if (tx_queue_id >= dev->data->nb_tx_queues)
return -EINVAL;
+ cpfl_txq = dev->data->tx_queues[tx_queue_id];
+
err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
@@ -707,7 +780,7 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
return err;
}
- txq = dev->data->tx_queues[tx_queue_id];
+ txq = &cpfl_txq->base;
txq->q_started = false;
txq->ops->release_mbufs(txq);
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
@@ -724,25 +797,25 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
void
cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- idpf_qc_rx_queue_release(dev->data->rx_queues[qid]);
+ cpfl_rx_queue_release(dev->data->rx_queues[qid]);
}
void
cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- idpf_qc_tx_queue_release(dev->data->tx_queues[qid]);
+ cpfl_tx_queue_release(dev->data->tx_queues[qid]);
}
void
cpfl_stop_queues(struct rte_eth_dev *dev)
{
- struct idpf_rx_queue *rxq;
- struct idpf_tx_queue *txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
int i;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- if (rxq == NULL)
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq == NULL)
continue;
if (cpfl_rx_queue_stop(dev, i) != 0)
@@ -750,8 +823,8 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL)
continue;
if (cpfl_tx_queue_stop(dev, i) != 0)
@@ -762,9 +835,10 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
void
cpfl_set_rx_function(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
#ifdef RTE_ARCH_X86
- struct idpf_rx_queue *rxq;
+ struct cpfl_rx_queue *cpfl_rxq;
int i;
if (cpfl_rx_vec_dev_check_default(dev) == CPFL_VECTOR_PATH &&
@@ -790,8 +864,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- (void)idpf_qc_splitq_rx_vec_setup(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ (void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
if (vport->rx_use_avx512) {
@@ -810,8 +884,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
} else {
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- (void)idpf_qc_singleq_rx_vec_setup(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ (void)idpf_qc_singleq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
if (vport->rx_use_avx512) {
@@ -860,10 +934,11 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
void
cpfl_set_tx_function(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
#ifdef RTE_ARCH_X86
#ifdef CC_AVX512_SUPPORT
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
int i;
#endif /* CC_AVX512_SUPPORT */
@@ -878,8 +953,8 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
vport->tx_use_avx512 = true;
if (vport->tx_use_avx512) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- idpf_qc_tx_vec_avx512_setup(txq);
+ cpfl_txq = dev->data->tx_queues[i];
+ idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);
}
}
}
@@ -916,10 +991,10 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)
#ifdef CC_AVX512_SUPPORT
if (vport->tx_use_avx512) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- if (txq == NULL)
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq == NULL)
continue;
- idpf_qc_tx_vec_avx512_setup(txq);
+ idpf_qc_tx_vec_avx512_setup(&cpfl_txq->base);
}
PMD_DRV_LOG(NOTICE,
"Using Single AVX512 Vector Tx (port %d).",
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index fb267d38c8..bfb9ad97bd 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -23,6 +23,14 @@
#define CPFL_SUPPORT_CHAIN_NUM 5
+struct cpfl_rx_queue {
+ struct idpf_rx_queue base;
+};
+
+struct cpfl_tx_queue {
+ struct idpf_tx_queue base;
+};
+
int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
index 665418d27d..5690b17911 100644
--- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
+++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
@@ -76,15 +76,16 @@ cpfl_rx_splitq_vec_default(struct idpf_rx_queue *rxq)
static inline int
cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)
{
- struct idpf_vport *vport = dev->data->dev_private;
- struct idpf_rx_queue *rxq;
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct cpfl_rx_queue *cpfl_rxq;
int i, default_ret, splitq_ret, ret = CPFL_SCALAR_PATH;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- default_ret = cpfl_rx_vec_queue_default(rxq);
+ cpfl_rxq = dev->data->rx_queues[i];
+ default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
- splitq_ret = cpfl_rx_splitq_vec_default(rxq);
+ splitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq->base);
ret = splitq_ret && default_ret;
} else {
ret = default_ret;
@@ -100,12 +101,12 @@ static inline int
cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)
{
int i;
- struct idpf_tx_queue *txq;
+ struct cpfl_tx_queue *cpfl_txq;
int ret = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txq = dev->data->tx_queues[i];
- ret = cpfl_tx_vec_queue_default(txq);
+ cpfl_txq = dev->data->tx_queues[i];
+ ret = cpfl_tx_vec_queue_default(&cpfl_txq->base);
if (ret == CPFL_SCALAR_PATH)
return CPFL_SCALAR_PATH;
}
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v10 02/14] common/idpf: support queue groups add/delete
2023-06-06 10:03 ` [PATCH v10 00/14] net/cpfl: add hairpin queue support beilei.xing
2023-06-06 6:40 ` Wu, Jingjing
2023-06-06 10:03 ` [PATCH v10 01/14] net/cpfl: refine structures beilei.xing
@ 2023-06-06 10:03 ` beilei.xing
2023-06-06 10:03 ` [PATCH v10 03/14] net/cpfl: add haipin queue group during vport init beilei.xing
` (11 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-06 10:03 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds queue group add/delete virtual channel support.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 66 ++++++++++++++++++++++
drivers/common/idpf/idpf_common_virtchnl.h | 9 +++
drivers/common/idpf/version.map | 2 +
3 files changed, 77 insertions(+)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index b713678634..a3fe55c897 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -359,6 +359,72 @@ idpf_vc_vport_destroy(struct idpf_vport *vport)
return err;
}
+int
+idpf_vc_queue_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *p2p_queue_grps_info,
+ uint8_t *p2p_queue_grps_out)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_cmd_info args;
+ int size, qg_info_size;
+ int err = -1;
+
+ size = sizeof(*p2p_queue_grps_info) +
+ (p2p_queue_grps_info->qg_info.num_queue_groups - 1) *
+ sizeof(struct virtchnl2_queue_group_info);
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_ADD_QUEUE_GROUPS;
+ args.in_args = (uint8_t *)p2p_queue_grps_info;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ if (err != 0) {
+ DRV_LOG(ERR,
+ "Failed to execute command of VIRTCHNL2_OP_ADD_QUEUE_GROUPS");
+ return err;
+ }
+
+ rte_memcpy(p2p_queue_grps_out, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
+ return 0;
+}
+
+int idpf_vc_queue_grps_del(struct idpf_vport *vport,
+ uint16_t num_q_grps,
+ struct virtchnl2_queue_group_id *qg_ids)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_delete_queue_groups *vc_del_q_grps;
+ struct idpf_cmd_info args;
+ int size;
+ int err;
+
+ size = sizeof(*vc_del_q_grps) +
+ (num_q_grps - 1) * sizeof(struct virtchnl2_queue_group_id);
+ vc_del_q_grps = rte_zmalloc("vc_del_q_grps", size, 0);
+
+ vc_del_q_grps->vport_id = vport->vport_id;
+ vc_del_q_grps->num_queue_groups = num_q_grps;
+ memcpy(vc_del_q_grps->qg_ids, qg_ids,
+ num_q_grps * sizeof(struct virtchnl2_queue_group_id));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_DEL_QUEUE_GROUPS;
+ args.in_args = (uint8_t *)vc_del_q_grps;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_DEL_QUEUE_GROUPS");
+
+ rte_free(vc_del_q_grps);
+ return err;
+}
+
int
idpf_vc_rss_key_set(struct idpf_vport *vport)
{
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index c45295290e..58b16e1c5d 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -64,4 +64,13 @@ int idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
__rte_internal
int idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
u16 *buff_count, struct idpf_dma_mem **buffs);
+__rte_internal
+int idpf_vc_queue_grps_del(struct idpf_vport *vport,
+ uint16_t num_q_grps,
+ struct virtchnl2_queue_group_id *qg_ids);
+__rte_internal
+int
+idpf_vc_queue_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *ptp_queue_grps_info,
+ uint8_t *ptp_queue_grps_out);
#endif /* _IDPF_COMMON_VIRTCHNL_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 70334a1b03..01d18f3f3f 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -43,6 +43,8 @@ INTERNAL {
idpf_vc_irq_map_unmap_config;
idpf_vc_one_msg_read;
idpf_vc_ptype_info_query;
+ idpf_vc_queue_grps_add;
+ idpf_vc_queue_grps_del;
idpf_vc_queue_switch;
idpf_vc_queues_ena_dis;
idpf_vc_rss_hash_get;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v10 03/14] net/cpfl: add haipin queue group during vport init
2023-06-06 10:03 ` [PATCH v10 00/14] net/cpfl: add hairpin queue support beilei.xing
` (2 preceding siblings ...)
2023-06-06 10:03 ` [PATCH v10 02/14] common/idpf: support queue groups add/delete beilei.xing
@ 2023-06-06 10:03 ` beilei.xing
2023-06-06 10:03 ` [PATCH v10 04/14] net/cpfl: support hairpin queue capbility get beilei.xing
` (10 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-06 10:03 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds haipin queue group during vport init.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 135 +++++++++++++++++++++++++++++++++
drivers/net/cpfl/cpfl_ethdev.h | 18 +++++
drivers/net/cpfl/cpfl_rxtx.h | 7 ++
3 files changed, 160 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index e587155db6..29d0375ecd 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -840,6 +840,21 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
return 0;
}
+static int
+cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
+{
+ struct virtchnl2_queue_group_id qg_ids;
+ int ret = 0;
+
+ memset(&qg_ids, 0, sizeof(qg_ids));
+ qg_ids.queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
+ qg_ids.queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
+ ret = idpf_vc_queue_grps_del(vport, CPFL_P2P_NB_QUEUE_GRPS, &qg_ids);
+ if (ret)
+ PMD_DRV_LOG(ERR, "Failed to delete p2p queue groups");
+ return ret;
+}
+
static int
cpfl_dev_close(struct rte_eth_dev *dev)
{
@@ -848,7 +863,12 @@ cpfl_dev_close(struct rte_eth_dev *dev)
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
+
+ if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
+ cpfl_p2p_queue_grps_del(vport);
+
idpf_vport_deinit(vport);
+ rte_free(cpfl_vport->p2p_q_chunks_info);
adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
adapter->cur_vport_nb--;
@@ -1284,6 +1304,96 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter)
return vport_idx;
}
+static int
+cpfl_p2p_q_grps_add(struct idpf_vport *vport,
+ struct virtchnl2_add_queue_groups *p2p_queue_grps_info,
+ uint8_t *p2p_q_vc_out_info)
+{
+ int ret;
+
+ p2p_queue_grps_info->vport_id = vport->vport_id;
+ p2p_queue_grps_info->qg_info.num_queue_groups = CPFL_P2P_NB_QUEUE_GRPS;
+ p2p_queue_grps_info->qg_info.groups[0].num_rx_q = CPFL_MAX_P2P_NB_QUEUES;
+ p2p_queue_grps_info->qg_info.groups[0].num_rx_bufq = CPFL_P2P_NB_RX_BUFQ;
+ p2p_queue_grps_info->qg_info.groups[0].num_tx_q = CPFL_MAX_P2P_NB_QUEUES;
+ p2p_queue_grps_info->qg_info.groups[0].num_tx_complq = CPFL_P2P_NB_TX_COMPLQ;
+ p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
+ p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
+ p2p_queue_grps_info->qg_info.groups[0].rx_q_grp_info.rss_lut_size = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.tx_tc = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.priority = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.is_sp = 0;
+ p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.pir_weight = 0;
+
+ ret = idpf_vc_queue_grps_add(vport, p2p_queue_grps_info, p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to add p2p queue groups.");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
+ struct virtchnl2_add_queue_groups *p2p_q_vc_out_info)
+{
+ struct p2p_queue_chunks_info *p2p_q_chunks_info = cpfl_vport->p2p_q_chunks_info;
+ struct virtchnl2_queue_reg_chunks *vc_chunks_out;
+ int i, type;
+
+ if (p2p_q_vc_out_info->qg_info.groups[0].qg_id.queue_group_type !=
+ VIRTCHNL2_QUEUE_GROUP_P2P) {
+ PMD_DRV_LOG(ERR, "Add queue group response mismatch.");
+ return -EINVAL;
+ }
+
+ vc_chunks_out = &p2p_q_vc_out_info->qg_info.groups[0].chunks;
+
+ for (i = 0; i < vc_chunks_out->num_chunks; i++) {
+ type = vc_chunks_out->chunks[i].type;
+ switch (type) {
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ p2p_q_chunks_info->tx_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->tx_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->tx_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ p2p_q_chunks_info->rx_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->rx_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->rx_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ p2p_q_chunks_info->tx_compl_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->tx_compl_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->tx_compl_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ p2p_q_chunks_info->rx_buf_start_qid =
+ vc_chunks_out->chunks[i].start_queue_id;
+ p2p_q_chunks_info->rx_buf_qtail_start =
+ vc_chunks_out->chunks[i].qtail_reg_start;
+ p2p_q_chunks_info->rx_buf_qtail_spacing =
+ vc_chunks_out->chunks[i].qtail_reg_spacing;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported queue type");
+ break;
+ }
+ }
+
+ return 0;
+}
+
static int
cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
{
@@ -1293,6 +1403,8 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
struct cpfl_adapter_ext *adapter = param->adapter;
/* for sending create vport virtchnl msg prepare */
struct virtchnl2_create_vport create_vport_info;
+ struct virtchnl2_add_queue_groups p2p_queue_grps_info;
+ uint8_t p2p_q_vc_out_info[IDPF_DFLT_MBX_BUF_SIZE] = {0};
int ret = 0;
dev->dev_ops = &cpfl_eth_dev_ops;
@@ -1327,6 +1439,29 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void *init_params)
rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
&dev->data->mac_addrs[0]);
+ if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq) {
+ memset(&p2p_queue_grps_info, 0, sizeof(p2p_queue_grps_info));
+ ret = cpfl_p2p_q_grps_add(vport, &p2p_queue_grps_info, p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_INIT_LOG(WARNING, "Failed to add p2p queue group.");
+ return 0;
+ }
+ cpfl_vport->p2p_q_chunks_info = rte_zmalloc(NULL,
+ sizeof(struct p2p_queue_chunks_info), 0);
+ if (cpfl_vport->p2p_q_chunks_info == NULL) {
+ PMD_INIT_LOG(WARNING, "Failed to allocate p2p queue info.");
+ cpfl_p2p_queue_grps_del(vport);
+ return 0;
+ }
+ ret = cpfl_p2p_queue_info_init(cpfl_vport,
+ (struct virtchnl2_add_queue_groups *)p2p_q_vc_out_info);
+ if (ret != 0) {
+ PMD_INIT_LOG(WARNING, "Failed to init p2p queue info.");
+ rte_free(cpfl_vport->p2p_q_chunks_info);
+ cpfl_p2p_queue_grps_del(vport);
+ }
+ }
+
return 0;
err_mac_addrs:
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 81fe9ac4c3..666d46a44a 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -56,6 +56,7 @@
/* Device IDs */
#define IDPF_DEV_ID_CPF 0x1453
+#define VIRTCHNL2_QUEUE_GROUP_P2P 0x100
struct cpfl_vport_param {
struct cpfl_adapter_ext *adapter;
@@ -69,8 +70,25 @@ struct cpfl_devargs {
uint16_t req_vport_nb;
};
+struct p2p_queue_chunks_info {
+ uint32_t tx_start_qid;
+ uint32_t rx_start_qid;
+ uint32_t tx_compl_start_qid;
+ uint32_t rx_buf_start_qid;
+
+ uint64_t tx_qtail_start;
+ uint32_t tx_qtail_spacing;
+ uint64_t rx_qtail_start;
+ uint32_t rx_qtail_spacing;
+ uint64_t tx_compl_qtail_start;
+ uint32_t tx_compl_qtail_spacing;
+ uint64_t rx_buf_qtail_start;
+ uint32_t rx_buf_qtail_spacing;
+};
+
struct cpfl_vport {
struct idpf_vport base;
+ struct p2p_queue_chunks_info *p2p_q_chunks_info;
};
struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index bfb9ad97bd..1fe65778f0 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -13,6 +13,13 @@
#define CPFL_MIN_RING_DESC 32
#define CPFL_MAX_RING_DESC 4096
#define CPFL_DMA_MEM_ALIGN 4096
+
+#define CPFL_MAX_P2P_NB_QUEUES 16
+#define CPFL_P2P_NB_RX_BUFQ 1
+#define CPFL_P2P_NB_TX_COMPLQ 1
+#define CPFL_P2P_NB_QUEUE_GRPS 1
+#define CPFL_P2P_QUEUE_GRP_ID 1
+
/* Base address of the HW descriptor ring should be 128B aligned. */
#define CPFL_RING_BASE_ALIGN 128
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v10 04/14] net/cpfl: support hairpin queue capbility get
2023-06-06 10:03 ` [PATCH v10 00/14] net/cpfl: add hairpin queue support beilei.xing
` (3 preceding siblings ...)
2023-06-06 10:03 ` [PATCH v10 03/14] net/cpfl: add haipin queue group during vport init beilei.xing
@ 2023-06-06 10:03 ` beilei.xing
2023-06-06 10:03 ` [PATCH v10 05/14] net/cpfl: support hairpin queue setup and release beilei.xing
` (9 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-06 10:03 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch adds hairpin_cap_get ops support.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 18 ++++++++++++++++++
drivers/net/cpfl/cpfl_rxtx.h | 3 +++
2 files changed, 21 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 29d0375ecd..7c6174fec0 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -154,6 +154,23 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,
return rte_eth_linkstatus_set(dev, &new_link);
}
+static int
+cpfl_hairpin_cap_get(struct rte_eth_dev *dev,
+ struct rte_eth_hairpin_cap *cap)
+{
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+
+ if (cpfl_vport->p2p_q_chunks_info == NULL)
+ return -ENOTSUP;
+
+ cap->max_nb_queues = CPFL_MAX_P2P_NB_QUEUES;
+ cap->max_rx_2_tx = CPFL_MAX_HAIRPINQ_RX_2_TX;
+ cap->max_tx_2_rx = CPFL_MAX_HAIRPINQ_TX_2_RX;
+ cap->max_nb_desc = CPFL_MAX_HAIRPINQ_NB_DESC;
+
+ return 0;
+}
+
static int
cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
@@ -905,6 +922,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.xstats_get = cpfl_dev_xstats_get,
.xstats_get_names = cpfl_dev_xstats_get_names,
.xstats_reset = cpfl_dev_xstats_reset,
+ .hairpin_cap_get = cpfl_hairpin_cap_get,
};
static int
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 1fe65778f0..a4a164d462 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -14,6 +14,9 @@
#define CPFL_MAX_RING_DESC 4096
#define CPFL_DMA_MEM_ALIGN 4096
+#define CPFL_MAX_HAIRPINQ_RX_2_TX 1
+#define CPFL_MAX_HAIRPINQ_TX_2_RX 1
+#define CPFL_MAX_HAIRPINQ_NB_DESC 1024
#define CPFL_MAX_P2P_NB_QUEUES 16
#define CPFL_P2P_NB_RX_BUFQ 1
#define CPFL_P2P_NB_TX_COMPLQ 1
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v10 05/14] net/cpfl: support hairpin queue setup and release
2023-06-06 10:03 ` [PATCH v10 00/14] net/cpfl: add hairpin queue support beilei.xing
` (4 preceding siblings ...)
2023-06-06 10:03 ` [PATCH v10 04/14] net/cpfl: support hairpin queue capbility get beilei.xing
@ 2023-06-06 10:03 ` beilei.xing
2023-06-06 10:03 ` [PATCH v10 06/14] common/idpf: add queue config API beilei.xing
` (8 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-06 10:03 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
Support hairpin Rx/Tx queue setup and release.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 6 +
drivers/net/cpfl/cpfl_ethdev.h | 11 +
drivers/net/cpfl/cpfl_rxtx.c | 364 +++++++++++++++++++++++-
drivers/net/cpfl/cpfl_rxtx.h | 36 +++
drivers/net/cpfl/cpfl_rxtx_vec_common.h | 4 +
5 files changed, 420 insertions(+), 1 deletion(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 7c6174fec0..3b2bda9280 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -880,6 +880,10 @@ cpfl_dev_close(struct rte_eth_dev *dev)
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
cpfl_dev_stop(dev);
+ if (cpfl_vport->p2p_mp) {
+ rte_mempool_free(cpfl_vport->p2p_mp);
+ cpfl_vport->p2p_mp = NULL;
+ }
if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
cpfl_p2p_queue_grps_del(vport);
@@ -923,6 +927,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.xstats_get_names = cpfl_dev_xstats_get_names,
.xstats_reset = cpfl_dev_xstats_reset,
.hairpin_cap_get = cpfl_hairpin_cap_get,
+ .rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
+ .tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
};
static int
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 666d46a44a..2e42354f70 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -89,6 +89,17 @@ struct p2p_queue_chunks_info {
struct cpfl_vport {
struct idpf_vport base;
struct p2p_queue_chunks_info *p2p_q_chunks_info;
+
+ struct rte_mempool *p2p_mp;
+
+ uint16_t nb_data_rxq;
+ uint16_t nb_data_txq;
+ uint16_t nb_p2p_rxq;
+ uint16_t nb_p2p_txq;
+
+ struct idpf_rx_queue *p2p_rx_bufq;
+ struct idpf_tx_queue *p2p_tx_complq;
+ bool p2p_manual_bind;
};
struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 04a51b8d15..90b408d1f4 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -10,6 +10,67 @@
#include "cpfl_rxtx.h"
#include "cpfl_rxtx_vec_common.h"
+static inline void
+cpfl_tx_hairpin_descq_reset(struct idpf_tx_queue *txq)
+{
+ uint32_t i, size;
+
+ if (!txq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+ return;
+ }
+
+ size = txq->nb_tx_desc * CPFL_P2P_DESC_LEN;
+ for (i = 0; i < size; i++)
+ ((volatile char *)txq->desc_ring)[i] = 0;
+}
+
+static inline void
+cpfl_tx_hairpin_complq_reset(struct idpf_tx_queue *cq)
+{
+ uint32_t i, size;
+
+ if (!cq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to complq is NULL");
+ return;
+ }
+
+ size = cq->nb_tx_desc * CPFL_P2P_DESC_LEN;
+ for (i = 0; i < size; i++)
+ ((volatile char *)cq->compl_ring)[i] = 0;
+}
+
+static inline void
+cpfl_rx_hairpin_descq_reset(struct idpf_rx_queue *rxq)
+{
+ uint16_t len;
+ uint32_t i;
+
+ if (!rxq)
+ return;
+
+ len = rxq->nb_rx_desc;
+ for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
+ ((volatile char *)rxq->rx_ring)[i] = 0;
+}
+
+static inline void
+cpfl_rx_hairpin_bufq_reset(struct idpf_rx_queue *rxbq)
+{
+ uint16_t len;
+ uint32_t i;
+
+ if (!rxbq)
+ return;
+
+ len = rxbq->nb_rx_desc;
+ for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
+ ((volatile char *)rxbq->rx_ring)[i] = 0;
+
+ rxbq->bufq1 = NULL;
+ rxbq->bufq2 = NULL;
+}
+
static uint64_t
cpfl_rx_offload_convert(uint64_t offload)
{
@@ -234,7 +295,10 @@ cpfl_rx_queue_release(void *rxq)
/* Split queue */
if (!q->adapter->is_rx_singleq) {
- if (q->bufq2)
+ /* the mz is shared between Tx/Rx hairpin, let Rx_release
+ * free the buf, q->bufq1->mz and q->mz.
+ */
+ if (!cpfl_rxq->hairpin_info.hairpin_q && q->bufq2)
cpfl_rx_split_bufq_release(q->bufq2);
if (q->bufq1)
@@ -385,6 +449,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
}
+ cpfl_vport->nb_data_rxq++;
rxq->q_set = true;
dev->data->rx_queues[queue_idx] = cpfl_rxq;
@@ -548,6 +613,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
queue_idx * vport->chunks_info.tx_qtail_spacing);
txq->ops = &def_txq_ops;
+ cpfl_vport->nb_data_txq++;
txq->q_set = true;
dev->data->tx_queues[queue_idx] = cpfl_txq;
@@ -562,6 +628,300 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return ret;
}
+static int
+cpfl_rx_hairpin_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,
+ uint16_t logic_qid, uint16_t nb_desc)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct rte_mempool *mp;
+ char pool_name[RTE_MEMPOOL_NAMESIZE];
+
+ mp = cpfl_vport->p2p_mp;
+ if (!mp) {
+ snprintf(pool_name, RTE_MEMPOOL_NAMESIZE, "p2p_mb_pool_%u",
+ dev->data->port_id);
+ mp = rte_pktmbuf_pool_create(pool_name, CPFL_P2P_NB_MBUF * CPFL_MAX_P2P_NB_QUEUES,
+ CPFL_P2P_CACHE_SIZE, 0, CPFL_P2P_MBUF_SIZE,
+ dev->device->numa_node);
+ if (!mp) {
+ PMD_INIT_LOG(ERR, "Failed to allocate mbuf pool for p2p");
+ return -ENOMEM;
+ }
+ cpfl_vport->p2p_mp = mp;
+ }
+
+ bufq->mp = mp;
+ bufq->nb_rx_desc = nb_desc;
+ bufq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_buf_start_qid,
+ logic_qid);
+ bufq->port_id = dev->data->port_id;
+ bufq->adapter = adapter;
+ bufq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
+
+ bufq->q_set = true;
+ bufq->ops = &def_rxq_ops;
+
+ return 0;
+}
+
+int
+cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf)
+{
+ struct cpfl_vport *cpfl_vport = (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter_base = vport->adapter;
+ uint16_t logic_qid = cpfl_vport->nb_p2p_rxq;
+ struct cpfl_rxq_hairpin_info *hairpin_info;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct idpf_rx_queue *bufq1 = NULL;
+ struct idpf_rx_queue *rxq;
+ uint16_t peer_port, peer_q;
+ uint16_t qid;
+ int ret;
+
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue.");
+ return -EINVAL;
+ }
+
+ if (conf->peer_count != 1) {
+ PMD_INIT_LOG(ERR, "Can't support Rx hairpin queue peer count %d", conf->peer_count);
+ return -EINVAL;
+ }
+
+ peer_port = conf->peers[0].port;
+ peer_q = conf->peers[0].queue;
+
+ if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
+ nb_desc > CPFL_MAX_RING_DESC ||
+ nb_desc < CPFL_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is invalid", nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory if needed */
+ if (dev->data->rx_queues[queue_idx]) {
+ cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* Setup Rx description queue */
+ cpfl_rxq = rte_zmalloc_socket("cpfl hairpin rxq",
+ sizeof(struct cpfl_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!cpfl_rxq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
+ return -ENOMEM;
+ }
+
+ rxq = &cpfl_rxq->base;
+ hairpin_info = &cpfl_rxq->hairpin_info;
+ rxq->nb_rx_desc = nb_desc * 2;
+ rxq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid, logic_qid);
+ rxq->port_id = dev->data->port_id;
+ rxq->adapter = adapter_base;
+ rxq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
+ hairpin_info->hairpin_q = true;
+ hairpin_info->peer_txp = peer_port;
+ hairpin_info->peer_txq_id = peer_q;
+
+ if (conf->manual_bind != 0)
+ cpfl_vport->p2p_manual_bind = true;
+ else
+ cpfl_vport->p2p_manual_bind = false;
+
+ if (cpfl_vport->p2p_rx_bufq == NULL) {
+ bufq1 = rte_zmalloc_socket("hairpin rx bufq1",
+ sizeof(struct idpf_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!bufq1) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for hairpin Rx buffer queue 1.");
+ ret = -ENOMEM;
+ goto err_alloc_bufq1;
+ }
+ qid = 2 * logic_qid;
+ ret = cpfl_rx_hairpin_bufq_setup(dev, bufq1, qid, nb_desc);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to setup hairpin Rx buffer queue 1");
+ ret = -EINVAL;
+ goto err_setup_bufq1;
+ }
+ cpfl_vport->p2p_rx_bufq = bufq1;
+ }
+
+ rxq->bufq1 = cpfl_vport->p2p_rx_bufq;
+ rxq->bufq2 = NULL;
+
+ cpfl_vport->nb_p2p_rxq++;
+ rxq->q_set = true;
+ dev->data->rx_queues[queue_idx] = cpfl_rxq;
+
+ return 0;
+
+err_setup_bufq1:
+ rte_mempool_free(cpfl_vport->p2p_mp);
+ rte_free(bufq1);
+err_alloc_bufq1:
+ rte_free(cpfl_rxq);
+
+ return ret;
+}
+
+int
+cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+
+ struct idpf_vport *vport = &cpfl_vport->base;
+ struct idpf_adapter *adapter_base = vport->adapter;
+ uint16_t logic_qid = cpfl_vport->nb_p2p_txq;
+ struct cpfl_txq_hairpin_info *hairpin_info;
+ struct idpf_hw *hw = &adapter_base->hw;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct idpf_tx_queue *txq, *cq;
+ const struct rte_memzone *mz;
+ uint32_t ring_size;
+ uint16_t peer_port, peer_q;
+ int ret;
+
+ if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin queue.");
+ return -EINVAL;
+ }
+
+ if (conf->peer_count != 1) {
+ PMD_INIT_LOG(ERR, "Can't support Tx hairpin queue peer count %d", conf->peer_count);
+ return -EINVAL;
+ }
+
+ peer_port = conf->peers[0].port;
+ peer_q = conf->peers[0].queue;
+
+ if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
+ nb_desc > CPFL_MAX_RING_DESC ||
+ nb_desc < CPFL_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is invalid",
+ nb_desc);
+ return -EINVAL;
+ }
+
+ /* Free memory if needed. */
+ if (dev->data->tx_queues[queue_idx]) {
+ cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate the TX queue data structure. */
+ cpfl_txq = rte_zmalloc_socket("cpfl hairpin txq",
+ sizeof(struct cpfl_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
+ if (!cpfl_txq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+ return -ENOMEM;
+ }
+
+ txq = &cpfl_txq->base;
+ hairpin_info = &cpfl_txq->hairpin_info;
+ /* Txq ring length should be 2 times of Tx completion queue size. */
+ txq->nb_tx_desc = nb_desc * 2;
+ txq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_start_qid, logic_qid);
+ txq->port_id = dev->data->port_id;
+ hairpin_info->hairpin_q = true;
+ hairpin_info->peer_rxp = peer_port;
+ hairpin_info->peer_rxq_id = peer_q;
+
+ if (conf->manual_bind != 0)
+ cpfl_vport->p2p_manual_bind = true;
+ else
+ cpfl_vport->p2p_manual_bind = false;
+
+ /* Always Tx hairpin queue allocates Tx HW ring */
+ ring_size = RTE_ALIGN(txq->nb_tx_desc * CPFL_P2P_DESC_LEN,
+ CPFL_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_ring", logic_qid,
+ ring_size + CPFL_P2P_RING_BUF,
+ CPFL_RING_BASE_ALIGN,
+ dev->device->numa_node);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
+ ret = -ENOMEM;
+ goto err_txq_mz_rsv;
+ }
+
+ txq->tx_ring_phys_addr = mz->iova;
+ txq->desc_ring = mz->addr;
+ txq->mz = mz;
+
+ cpfl_tx_hairpin_descq_reset(txq);
+ txq->qtx_tail = hw->hw_addr +
+ cpfl_hw_qtail_get(cpfl_vport->p2p_q_chunks_info->tx_qtail_start,
+ logic_qid, cpfl_vport->p2p_q_chunks_info->tx_qtail_spacing);
+ txq->ops = &def_txq_ops;
+
+ if (cpfl_vport->p2p_tx_complq == NULL) {
+ cq = rte_zmalloc_socket("cpfl hairpin cq",
+ sizeof(struct idpf_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ dev->device->numa_node);
+ if (!cq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+ ret = -ENOMEM;
+ goto err_cq_alloc;
+ }
+
+ cq->nb_tx_desc = nb_desc;
+ cq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_compl_start_qid,
+ 0);
+ cq->port_id = dev->data->port_id;
+
+ /* Tx completion queue always allocates the HW ring */
+ ring_size = RTE_ALIGN(cq->nb_tx_desc * CPFL_P2P_DESC_LEN,
+ CPFL_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_compl_ring", logic_qid,
+ ring_size + CPFL_P2P_RING_BUF,
+ CPFL_RING_BASE_ALIGN,
+ dev->device->numa_node);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX completion queue");
+ ret = -ENOMEM;
+ goto err_cq_mz_rsv;
+ }
+ cq->tx_ring_phys_addr = mz->iova;
+ cq->compl_ring = mz->addr;
+ cq->mz = mz;
+
+ cpfl_tx_hairpin_complq_reset(cq);
+ cpfl_vport->p2p_tx_complq = cq;
+ }
+
+ txq->complq = cpfl_vport->p2p_tx_complq;
+
+ cpfl_vport->nb_p2p_txq++;
+ txq->q_set = true;
+ dev->data->tx_queues[queue_idx] = cpfl_txq;
+
+ return 0;
+
+err_cq_mz_rsv:
+ rte_free(cq);
+err_cq_alloc:
+ cpfl_dma_zone_release(mz);
+err_txq_mz_rsv:
+ rte_free(cpfl_txq);
+ return ret;
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
@@ -865,6 +1225,8 @@ cpfl_set_rx_function(struct rte_eth_dev *dev)
if (vport->rx_vec_allowed) {
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ continue;
(void)idpf_qc_splitq_rx_vec_setup(&cpfl_rxq->base);
}
#ifdef CC_AVX512_SUPPORT
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index a4a164d462..06198d4aad 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -22,6 +22,11 @@
#define CPFL_P2P_NB_TX_COMPLQ 1
#define CPFL_P2P_NB_QUEUE_GRPS 1
#define CPFL_P2P_QUEUE_GRP_ID 1
+#define CPFL_P2P_DESC_LEN 16
+#define CPFL_P2P_NB_MBUF 4096
+#define CPFL_P2P_CACHE_SIZE 250
+#define CPFL_P2P_MBUF_SIZE 2048
+#define CPFL_P2P_RING_BUF 128
/* Base address of the HW descriptor ring should be 128B aligned. */
#define CPFL_RING_BASE_ALIGN 128
@@ -33,14 +38,40 @@
#define CPFL_SUPPORT_CHAIN_NUM 5
+struct cpfl_rxq_hairpin_info {
+ bool hairpin_q; /* if rx queue is a hairpin queue */
+ uint16_t peer_txp;
+ uint16_t peer_txq_id;
+};
+
struct cpfl_rx_queue {
struct idpf_rx_queue base;
+ struct cpfl_rxq_hairpin_info hairpin_info;
+};
+
+struct cpfl_txq_hairpin_info {
+ bool hairpin_q; /* if tx queue is a hairpin queue */
+ uint16_t peer_rxp;
+ uint16_t peer_rxq_id;
};
struct cpfl_tx_queue {
struct idpf_tx_queue base;
+ struct cpfl_txq_hairpin_info hairpin_info;
};
+static inline uint16_t
+cpfl_hw_qid_get(uint16_t start_qid, uint16_t offset)
+{
+ return start_qid + offset;
+}
+
+static inline uint64_t
+cpfl_hw_qtail_get(uint64_t tail_start, uint16_t offset, uint64_t tail_spacing)
+{
+ return tail_start + offset * tail_spacing;
+}
+
int cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
@@ -59,4 +90,9 @@ void cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
void cpfl_set_rx_function(struct rte_eth_dev *dev);
void cpfl_set_tx_function(struct rte_eth_dev *dev);
+int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, const struct rte_eth_hairpin_conf *conf);
+int cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc,
+ const struct rte_eth_hairpin_conf *conf);
#endif /* _CPFL_RXTX_H_ */
diff --git a/drivers/net/cpfl/cpfl_rxtx_vec_common.h b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
index 5690b17911..d8e9191196 100644
--- a/drivers/net/cpfl/cpfl_rxtx_vec_common.h
+++ b/drivers/net/cpfl/cpfl_rxtx_vec_common.h
@@ -85,6 +85,8 @@ cpfl_rx_vec_dev_check_default(struct rte_eth_dev *dev)
cpfl_rxq = dev->data->rx_queues[i];
default_ret = cpfl_rx_vec_queue_default(&cpfl_rxq->base);
if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ continue;
splitq_ret = cpfl_rx_splitq_vec_default(&cpfl_rxq->base);
ret = splitq_ret && default_ret;
} else {
@@ -106,6 +108,8 @@ cpfl_tx_vec_dev_check_default(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_tx_queues; i++) {
cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq->hairpin_info.hairpin_q)
+ continue;
ret = cpfl_tx_vec_queue_default(&cpfl_txq->base);
if (ret == CPFL_SCALAR_PATH)
return CPFL_SCALAR_PATH;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v10 06/14] common/idpf: add queue config API
2023-06-06 10:03 ` [PATCH v10 00/14] net/cpfl: add hairpin queue support beilei.xing
` (5 preceding siblings ...)
2023-06-06 10:03 ` [PATCH v10 05/14] net/cpfl: support hairpin queue setup and release beilei.xing
@ 2023-06-06 10:03 ` beilei.xing
2023-06-06 10:03 ` [PATCH v10 07/14] net/cpfl: support hairpin queue configuration beilei.xing
` (7 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-06 10:03 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx queue configuration APIs.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 70 ++++++++++++++++++++++
drivers/common/idpf/idpf_common_virtchnl.h | 6 ++
drivers/common/idpf/version.map | 2 +
3 files changed, 78 insertions(+)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index a3fe55c897..211b44a88e 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -1050,6 +1050,41 @@ idpf_vc_rxq_config(struct idpf_vport *vport, struct idpf_rx_queue *rxq)
return err;
}
+int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct virtchnl2_rxq_info *rxq_info,
+ uint16_t num_qs)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+ struct idpf_cmd_info args;
+ int size, err, i;
+
+ size = sizeof(*vc_rxqs) + (num_qs - 1) *
+ sizeof(struct virtchnl2_rxq_info);
+ vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+ if (vc_rxqs == NULL) {
+ DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+ err = -ENOMEM;
+ return err;
+ }
+ vc_rxqs->vport_id = vport->vport_id;
+ vc_rxqs->num_qinfo = num_qs;
+ memcpy(vc_rxqs->qinfo, rxq_info, num_qs * sizeof(struct virtchnl2_rxq_info));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+ args.in_args = (uint8_t *)vc_rxqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ rte_free(vc_rxqs);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+ return err;
+}
+
int
idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq)
{
@@ -1121,6 +1156,41 @@ idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq)
return err;
}
+int
+idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
+ uint16_t num_qs)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+ struct idpf_cmd_info args;
+ int size, err;
+
+ size = sizeof(*vc_txqs) + (num_qs - 1) * sizeof(struct virtchnl2_txq_info);
+ vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+ if (vc_txqs == NULL) {
+ DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+ err = -ENOMEM;
+ return err;
+ }
+ vc_txqs->vport_id = vport->vport_id;
+ vc_txqs->num_qinfo = num_qs;
+ memcpy(vc_txqs->qinfo, txq_info, num_qs * sizeof(struct virtchnl2_txq_info));
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+ args.in_args = (uint8_t *)vc_txqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_vc_cmd_execute(adapter, &args);
+ rte_free(vc_txqs);
+ if (err != 0)
+ DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+ return err;
+}
+
int
idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
struct idpf_ctlq_msg *q_msg)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index 58b16e1c5d..db83761a5e 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -65,6 +65,12 @@ __rte_internal
int idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
u16 *buff_count, struct idpf_dma_mem **buffs);
__rte_internal
+int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct virtchnl2_rxq_info *rxq_info,
+ uint16_t num_qs);
+__rte_internal
+int idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
+ uint16_t num_qs);
+__rte_internal
int idpf_vc_queue_grps_del(struct idpf_vport *vport,
uint16_t num_q_grps,
struct virtchnl2_queue_group_id *qg_ids);
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 01d18f3f3f..17e77884ce 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -54,8 +54,10 @@ INTERNAL {
idpf_vc_rss_lut_get;
idpf_vc_rss_lut_set;
idpf_vc_rxq_config;
+ idpf_vc_rxq_config_by_info;
idpf_vc_stats_query;
idpf_vc_txq_config;
+ idpf_vc_txq_config_by_info;
idpf_vc_vectors_alloc;
idpf_vc_vectors_dealloc;
idpf_vc_vport_create;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v10 07/14] net/cpfl: support hairpin queue configuration
2023-06-06 10:03 ` [PATCH v10 00/14] net/cpfl: add hairpin queue support beilei.xing
` (6 preceding siblings ...)
2023-06-06 10:03 ` [PATCH v10 06/14] common/idpf: add queue config API beilei.xing
@ 2023-06-06 10:03 ` beilei.xing
2023-06-06 10:03 ` [PATCH v10 08/14] common/idpf: add switch queue API beilei.xing
` (6 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-06 10:03 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx hairpin queue configuration.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 136 +++++++++++++++++++++++++++++++--
drivers/net/cpfl/cpfl_rxtx.c | 88 +++++++++++++++++++++
drivers/net/cpfl/cpfl_rxtx.h | 7 ++
3 files changed, 225 insertions(+), 6 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 3b2bda9280..749589828a 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -742,33 +742,157 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
return idpf_vport_irq_map_config(vport, nb_rx_queues);
}
+/* Update hairpin_info for dev's tx hairpin queue */
+static int
+cpfl_txq_hairpin_info_update(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port];
+ struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private;
+ struct cpfl_txq_hairpin_info *hairpin_info;
+ struct cpfl_tx_queue *cpfl_txq;
+ int i;
+
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ hairpin_info = &cpfl_txq->hairpin_info;
+ if (hairpin_info->peer_rxp != rx_port) {
+ PMD_DRV_LOG(ERR, "port %d is not the peer port", rx_port);
+ return -EINVAL;
+ }
+ hairpin_info->peer_rxq_id =
+ cpfl_hw_qid_get(cpfl_rx_vport->p2p_q_chunks_info->rx_start_qid,
+ hairpin_info->peer_rxq_id - cpfl_rx_vport->nb_data_rxq);
+ }
+
+ return 0;
+}
+
+/* Bind Rx hairpin queue's memory zone to peer Tx hairpin queue's memory zone */
+static void
+cpfl_rxq_hairpin_mz_bind(struct rte_eth_dev *dev)
+{
+ struct cpfl_vport *cpfl_rx_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_rx_vport->base;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_hw *hw = &adapter->hw;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct rte_eth_dev *peer_dev;
+ const struct rte_memzone *mz;
+ uint16_t peer_tx_port;
+ uint16_t peer_tx_qid;
+ int i;
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ peer_tx_port = cpfl_rxq->hairpin_info.peer_txp;
+ peer_tx_qid = cpfl_rxq->hairpin_info.peer_txq_id;
+ peer_dev = &rte_eth_devices[peer_tx_port];
+ cpfl_txq = peer_dev->data->tx_queues[peer_tx_qid];
+
+ /* bind rx queue */
+ mz = cpfl_txq->base.mz;
+ cpfl_rxq->base.rx_ring_phys_addr = mz->iova;
+ cpfl_rxq->base.rx_ring = mz->addr;
+ cpfl_rxq->base.mz = mz;
+
+ /* bind rx buffer queue */
+ mz = cpfl_txq->base.complq->mz;
+ cpfl_rxq->base.bufq1->rx_ring_phys_addr = mz->iova;
+ cpfl_rxq->base.bufq1->rx_ring = mz->addr;
+ cpfl_rxq->base.bufq1->mz = mz;
+ cpfl_rxq->base.bufq1->qrx_tail = hw->hw_addr +
+ cpfl_hw_qtail_get(cpfl_rx_vport->p2p_q_chunks_info->rx_buf_qtail_start,
+ 0, cpfl_rx_vport->p2p_q_chunks_info->rx_buf_qtail_spacing);
+ }
+}
+
static int
cpfl_start_queues(struct rte_eth_dev *dev)
{
+ struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+ struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_rx_queue *cpfl_rxq;
struct cpfl_tx_queue *cpfl_txq;
+ int update_flag = 0;
int err = 0;
int i;
+ /* For normal data queues, configure, init and enale Txq.
+ * For non-manual bind hairpin queues, configure Txq.
+ */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
cpfl_txq = dev->data->tx_queues[i];
if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
continue;
- err = cpfl_tx_queue_start(dev, i);
+ if (!cpfl_txq->hairpin_info.hairpin_q) {
+ err = cpfl_tx_queue_start(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
+ return err;
+ }
+ } else if (!cpfl_vport->p2p_manual_bind) {
+ if (update_flag == 0) {
+ err = cpfl_txq_hairpin_info_update(dev,
+ cpfl_txq->hairpin_info.peer_rxp);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to update Tx hairpin queue info");
+ return err;
+ }
+ update_flag = 1;
+ }
+ err = cpfl_hairpin_txq_config(vport, cpfl_txq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u", i);
+ return err;
+ }
+ }
+ }
+
+ /* For non-manual bind hairpin queues, configure Tx completion queue first.*/
+ if (!cpfl_vport->p2p_manual_bind && cpfl_vport->p2p_tx_complq != NULL) {
+ err = cpfl_hairpin_tx_complq_config(cpfl_vport);
if (err != 0) {
- PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
+ PMD_DRV_LOG(ERR, "Fail to config Tx completion queue");
return err;
}
}
+ /* For non-manual bind hairpin queues, configure Rx buffer queue.*/
+ if (!cpfl_vport->p2p_manual_bind && cpfl_vport->p2p_rx_bufq != NULL) {
+ cpfl_rxq_hairpin_mz_bind(dev);
+ err = cpfl_hairpin_rx_bufq_config(cpfl_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue");
+ return err;
+ }
+ }
+
+ /* For normal data queues, configure, init and enale Rxq.
+ * For non-manual bind hairpin queues, configure Rxq, and then init Rxq.
+ */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)
continue;
- err = cpfl_rx_queue_start(dev, i);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
- return err;
+ if (!cpfl_rxq->hairpin_info.hairpin_q) {
+ err = cpfl_rx_queue_start(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
+ return err;
+ }
+ } else if (!cpfl_vport->p2p_manual_bind) {
+ err = cpfl_hairpin_rxq_config(vport, cpfl_rxq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue %u", i);
+ return err;
+ }
+ err = cpfl_rx_queue_init(dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u", i);
+ return err;
+ }
}
}
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 90b408d1f4..fd24d544a1 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -922,6 +922,94 @@ cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return ret;
}
+int
+cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport)
+{
+ struct idpf_rx_queue *rx_bufq = cpfl_vport->p2p_rx_bufq;
+ struct virtchnl2_rxq_info rxq_info;
+
+ memset(&rxq_info, 0, sizeof(rxq_info));
+
+ rxq_info.type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ rxq_info.queue_id = rx_bufq->queue_id;
+ rxq_info.ring_len = rx_bufq->nb_rx_desc;
+ rxq_info.dma_ring_addr = rx_bufq->rx_ring_phys_addr;
+ rxq_info.desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ rxq_info.rx_buffer_low_watermark = CPFL_RXBUF_LOW_WATERMARK;
+ rxq_info.model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ rxq_info.data_buffer_size = rx_bufq->rx_buf_len;
+ rxq_info.buffer_notif_stride = CPFL_RX_BUF_STRIDE;
+
+ return idpf_vc_rxq_config_by_info(&cpfl_vport->base, &rxq_info, 1);
+}
+
+int
+cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq)
+{
+ struct virtchnl2_rxq_info rxq_info;
+ struct idpf_rx_queue *rxq = &cpfl_rxq->base;
+
+ memset(&rxq_info, 0, sizeof(rxq_info));
+
+ rxq_info.type = VIRTCHNL2_QUEUE_TYPE_RX;
+ rxq_info.queue_id = rxq->queue_id;
+ rxq_info.ring_len = rxq->nb_rx_desc;
+ rxq_info.dma_ring_addr = rxq->rx_ring_phys_addr;
+ rxq_info.rx_bufq1_id = rxq->bufq1->queue_id;
+ rxq_info.max_pkt_size = vport->max_pkt_len;
+ rxq_info.desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ rxq_info.qflags |= VIRTCHNL2_RX_DESC_SIZE_16BYTE;
+
+ rxq_info.data_buffer_size = rxq->rx_buf_len;
+ rxq_info.model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ rxq_info.rx_buffer_low_watermark = CPFL_RXBUF_LOW_WATERMARK;
+
+ PMD_DRV_LOG(NOTICE, "hairpin: vport %u, Rxq id 0x%x",
+ vport->vport_id, rxq_info.queue_id);
+
+ return idpf_vc_rxq_config_by_info(vport, &rxq_info, 1);
+}
+
+int
+cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport)
+{
+ struct idpf_tx_queue *tx_complq = cpfl_vport->p2p_tx_complq;
+ struct virtchnl2_txq_info txq_info;
+
+ memset(&txq_info, 0, sizeof(txq_info));
+
+ txq_info.dma_ring_addr = tx_complq->tx_ring_phys_addr;
+ txq_info.type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ txq_info.queue_id = tx_complq->queue_id;
+ txq_info.ring_len = tx_complq->nb_tx_desc;
+ txq_info.peer_rx_queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+ txq_info.model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ txq_info.sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+
+ return idpf_vc_txq_config_by_info(&cpfl_vport->base, &txq_info, 1);
+}
+
+int
+cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq)
+{
+ struct idpf_tx_queue *txq = &cpfl_txq->base;
+ struct virtchnl2_txq_info txq_info;
+
+ memset(&txq_info, 0, sizeof(txq_info));
+
+ txq_info.dma_ring_addr = txq->tx_ring_phys_addr;
+ txq_info.type = VIRTCHNL2_QUEUE_TYPE_TX;
+ txq_info.queue_id = txq->queue_id;
+ txq_info.ring_len = txq->nb_tx_desc;
+ txq_info.tx_compl_queue_id = txq->complq->queue_id;
+ txq_info.relative_queue_id = txq->queue_id;
+ txq_info.peer_rx_queue_id = cpfl_txq->hairpin_info.peer_rxq_id;
+ txq_info.model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ txq_info.sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+
+ return idpf_vc_txq_config_by_info(vport, &txq_info, 1);
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 06198d4aad..872ebc1bfd 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -32,12 +32,15 @@
#define CPFL_RING_BASE_ALIGN 128
#define CPFL_DEFAULT_RX_FREE_THRESH 32
+#define CPFL_RXBUF_LOW_WATERMARK 64
#define CPFL_DEFAULT_TX_RS_THRESH 32
#define CPFL_DEFAULT_TX_FREE_THRESH 32
#define CPFL_SUPPORT_CHAIN_NUM 5
+#define CPFL_RX_BUF_STRIDE 64
+
struct cpfl_rxq_hairpin_info {
bool hairpin_q; /* if rx queue is a hairpin queue */
uint16_t peer_txp;
@@ -95,4 +98,8 @@ int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
int cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc,
const struct rte_eth_hairpin_conf *conf);
+int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport);
+int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq);
+int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
+int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
#endif /* _CPFL_RXTX_H_ */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v10 08/14] common/idpf: add switch queue API
2023-06-06 10:03 ` [PATCH v10 00/14] net/cpfl: add hairpin queue support beilei.xing
` (7 preceding siblings ...)
2023-06-06 10:03 ` [PATCH v10 07/14] net/cpfl: support hairpin queue configuration beilei.xing
@ 2023-06-06 10:03 ` beilei.xing
2023-06-06 10:03 ` [PATCH v10 09/14] net/cpfl: support hairpin queue start/stop beilei.xing
` (5 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-06 10:03 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch adds idpf_vc_ena_dis_one_queue API.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 2 +-
drivers/common/idpf/idpf_common_virtchnl.h | 3 +++
drivers/common/idpf/version.map | 1 +
3 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index 211b44a88e..6455f640da 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -733,7 +733,7 @@ idpf_vc_vectors_dealloc(struct idpf_vport *vport)
return err;
}
-static int
+int
idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
uint32_t type, bool on)
{
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h b/drivers/common/idpf/idpf_common_virtchnl.h
index db83761a5e..9ff5c38c26 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -71,6 +71,9 @@ __rte_internal
int idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info *txq_info,
uint16_t num_qs);
__rte_internal
+int idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
+ uint32_t type, bool on);
+__rte_internal
int idpf_vc_queue_grps_del(struct idpf_vport *vport,
uint16_t num_q_grps,
struct virtchnl2_queue_group_id *qg_ids);
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 17e77884ce..25624732b0 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -40,6 +40,7 @@ INTERNAL {
idpf_vc_cmd_execute;
idpf_vc_ctlq_post_rx_buffs;
idpf_vc_ctlq_recv;
+ idpf_vc_ena_dis_one_queue;
idpf_vc_irq_map_unmap_config;
idpf_vc_one_msg_read;
idpf_vc_ptype_info_query;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v10 09/14] net/cpfl: support hairpin queue start/stop
2023-06-06 10:03 ` [PATCH v10 00/14] net/cpfl: add hairpin queue support beilei.xing
` (8 preceding siblings ...)
2023-06-06 10:03 ` [PATCH v10 08/14] common/idpf: add switch queue API beilei.xing
@ 2023-06-06 10:03 ` beilei.xing
2023-06-06 10:03 ` [PATCH v10 10/14] common/idpf: add irq map config API beilei.xing
` (4 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-06 10:03 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports Rx/Tx hairpin queue start/stop.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 46 +++++++++
drivers/net/cpfl/cpfl_rxtx.c | 164 +++++++++++++++++++++++++++++----
drivers/net/cpfl/cpfl_rxtx.h | 15 +++
3 files changed, 207 insertions(+), 18 deletions(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 749589828a..4fb9aef68b 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -896,6 +896,52 @@ cpfl_start_queues(struct rte_eth_dev *dev)
}
}
+ /* For non-manual bind hairpin queues, enable Tx queue and Rx queue,
+ * then enable Tx completion queue and Rx buffer queue.
+ */
+ for (i = cpfl_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ if (cpfl_txq->hairpin_info.hairpin_q && !cpfl_vport->p2p_manual_bind) {
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ i - cpfl_vport->nb_data_txq,
+ false, true);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u on",
+ i);
+ else
+ cpfl_txq->base.q_started = true;
+ }
+ }
+
+ for (i = cpfl_vport->nb_data_rxq; i < dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q && !cpfl_vport->p2p_manual_bind) {
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ i - cpfl_vport->nb_data_rxq,
+ true, true);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u on",
+ i);
+ else
+ cpfl_rxq->base.q_started = true;
+ }
+ }
+
+ if (!cpfl_vport->p2p_manual_bind &&
+ cpfl_vport->p2p_tx_complq != NULL &&
+ cpfl_vport->p2p_rx_bufq != NULL) {
+ err = cpfl_switch_hairpin_complq(cpfl_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq");
+ return err;
+ }
+ err = cpfl_switch_hairpin_bufq(cpfl_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Rx bufq");
+ return err;
+ }
+ }
+
return err;
}
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index fd24d544a1..9d278dca54 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -1010,6 +1010,89 @@ cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq
return idpf_vc_txq_config_by_info(vport, &txq_info, 1);
}
+int
+cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ queue_id = cpfl_vport->p2p_tx_complq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
+int
+cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+ return err;
+}
+
+int
+cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t logic_qid,
+ bool rx, bool on)
+{
+ struct idpf_vport *vport = &cpfl_vport->base;
+ uint32_t type;
+ int err, queue_id;
+
+ type = rx ? VIRTCHNL2_QUEUE_TYPE_RX : VIRTCHNL2_QUEUE_TYPE_TX;
+
+ if (type == VIRTCHNL2_QUEUE_TYPE_RX)
+ queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid, logic_qid);
+ else
+ queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_start_qid, logic_qid);
+ err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+ if (err)
+ return err;
+
+ return err;
+}
+
+static int
+cpfl_alloc_split_p2p_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+ volatile struct virtchnl2_p2p_rx_buf_desc *rxd;
+ struct rte_mbuf *mbuf = NULL;
+ uint64_t dma_addr;
+ uint16_t i;
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ mbuf = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!mbuf)) {
+ PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+ return -ENOMEM;
+ }
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+ rxd = &((volatile struct virtchnl2_p2p_rx_buf_desc *)(rxq->rx_ring))[i];
+ rxd->reserve0 = 0;
+ rxd->pkt_addr = dma_addr;
+ }
+
+ rxq->nb_rx_hold = 0;
+ /* The value written in the RX buffer queue tail register, must be a multiple of 8.*/
+ rxq->rx_tail = rxq->nb_rx_desc - CPFL_HAIRPIN_Q_TAIL_AUX_VALUE;
+
+ return 0;
+}
+
int
cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
@@ -1063,22 +1146,31 @@ cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
IDPF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
} else {
/* Split queue */
- err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
- return err;
- }
- err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
- if (err != 0) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
- return err;
+ if (cpfl_rxq->hairpin_info.hairpin_q) {
+ err = cpfl_alloc_split_p2p_rxq_mbufs(rxq->bufq1);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate p2p RX buffer queue mbuf");
+ return err;
+ }
+ } else {
+ err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq1);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
+ return err;
+ }
+ err = idpf_qc_split_rxq_mbufs_alloc(rxq->bufq2);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
+ return err;
+ }
}
rte_wmb();
/* Init the RX tail register. */
IDPF_PCI_REG_WRITE(rxq->bufq1->qrx_tail, rxq->bufq1->rx_tail);
- IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail);
+ if (rxq->bufq2)
+ IDPF_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->bufq2->rx_tail);
}
return err;
@@ -1185,7 +1277,12 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
return -EINVAL;
cpfl_rxq = dev->data->rx_queues[rx_queue_id];
- err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ rx_queue_id - cpfl_vport->nb_data_txq,
+ true, false);
+ else
+ err = idpf_vc_queue_switch(vport, rx_queue_id, true, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
rx_queue_id);
@@ -1199,10 +1296,17 @@ cpfl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
idpf_qc_single_rx_queue_reset(rxq);
} else {
rxq->bufq1->ops->release_mbufs(rxq->bufq1);
- rxq->bufq2->ops->release_mbufs(rxq->bufq2);
- idpf_qc_split_rx_queue_reset(rxq);
+ if (rxq->bufq2)
+ rxq->bufq2->ops->release_mbufs(rxq->bufq2);
+ if (cpfl_rxq->hairpin_info.hairpin_q) {
+ cpfl_rx_hairpin_descq_reset(rxq);
+ cpfl_rx_hairpin_bufq_reset(rxq->bufq1);
+ } else {
+ idpf_qc_split_rx_queue_reset(rxq);
+ }
}
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ if (!cpfl_rxq->hairpin_info.hairpin_q)
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1221,7 +1325,12 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
cpfl_txq = dev->data->tx_queues[tx_queue_id];
- err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
+ if (cpfl_txq->hairpin_info.hairpin_q)
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+ tx_queue_id - cpfl_vport->nb_data_txq,
+ false, false);
+ else
+ err = idpf_vc_queue_switch(vport, tx_queue_id, false, false);
if (err != 0) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
tx_queue_id);
@@ -1234,10 +1343,17 @@ cpfl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
idpf_qc_single_tx_queue_reset(txq);
} else {
- idpf_qc_split_tx_descq_reset(txq);
- idpf_qc_split_tx_complq_reset(txq->complq);
+ if (cpfl_txq->hairpin_info.hairpin_q) {
+ cpfl_tx_hairpin_descq_reset(txq);
+ cpfl_tx_hairpin_complq_reset(txq->complq);
+ } else {
+ idpf_qc_split_tx_descq_reset(txq);
+ idpf_qc_split_tx_complq_reset(txq->complq);
+ }
}
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ if (!cpfl_txq->hairpin_info.hairpin_q)
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1257,10 +1373,22 @@ cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
void
cpfl_stop_queues(struct rte_eth_dev *dev)
{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
struct cpfl_rx_queue *cpfl_rxq;
struct cpfl_tx_queue *cpfl_txq;
int i;
+ if (cpfl_vport->p2p_tx_complq != NULL) {
+ if (cpfl_switch_hairpin_complq(cpfl_vport, false) != 0)
+ PMD_DRV_LOG(ERR, "Failed to stop hairpin Tx complq");
+ }
+
+ if (cpfl_vport->p2p_rx_bufq != NULL) {
+ if (cpfl_switch_hairpin_bufq(cpfl_vport, false) != 0)
+ PMD_DRV_LOG(ERR, "Failed to stop hairpin Rx bufq");
+ }
+
for (i = 0; i < dev->data->nb_rx_queues; i++) {
cpfl_rxq = dev->data->rx_queues[i];
if (cpfl_rxq == NULL)
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 872ebc1bfd..aacd087b56 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -41,6 +41,17 @@
#define CPFL_RX_BUF_STRIDE 64
+/* The value written in the RX buffer queue tail register,
+ * and in WritePTR field in the TX completion queue context,
+ * must be a multiple of 8.
+ */
+#define CPFL_HAIRPIN_Q_TAIL_AUX_VALUE 8
+
+struct virtchnl2_p2p_rx_buf_desc {
+ __le64 reserve0;
+ __le64 pkt_addr; /* Packet buffer address */
+};
+
struct cpfl_rxq_hairpin_info {
bool hairpin_q; /* if rx queue is a hairpin queue */
uint16_t peer_txp;
@@ -102,4 +113,8 @@ int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue *cpfl_txq);
int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue *cpfl_rxq);
+int cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on);
+int cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t qid,
+ bool rx, bool on);
#endif /* _CPFL_RXTX_H_ */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v10 10/14] common/idpf: add irq map config API
2023-06-06 10:03 ` [PATCH v10 00/14] net/cpfl: add hairpin queue support beilei.xing
` (9 preceding siblings ...)
2023-06-06 10:03 ` [PATCH v10 09/14] net/cpfl: support hairpin queue start/stop beilei.xing
@ 2023-06-06 10:03 ` beilei.xing
2023-06-06 10:03 ` [PATCH v10 11/14] net/cpfl: enable write back based on ITR expire beilei.xing
` (3 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-06 10:03 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch supports idpf_vport_irq_map_config_by_qids API.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_device.c | 75 ++++++++++++++++++++++++
drivers/common/idpf/idpf_common_device.h | 4 ++
drivers/common/idpf/version.map | 1 +
3 files changed, 80 insertions(+)
diff --git a/drivers/common/idpf/idpf_common_device.c b/drivers/common/idpf/idpf_common_device.c
index dc47551b17..cc4207a46e 100644
--- a/drivers/common/idpf/idpf_common_device.c
+++ b/drivers/common/idpf/idpf_common_device.c
@@ -667,6 +667,81 @@ idpf_vport_irq_map_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
return ret;
}
+int
+idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport, uint32_t *qids, uint16_t nb_rx_queues)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_queue_vector *qv_map;
+ struct idpf_hw *hw = &adapter->hw;
+ uint32_t dynctl_val, itrn_val;
+ uint32_t dynctl_reg_start;
+ uint32_t itrn_reg_start;
+ uint16_t i;
+ int ret;
+
+ qv_map = rte_zmalloc("qv_map",
+ nb_rx_queues *
+ sizeof(struct virtchnl2_queue_vector), 0);
+ if (qv_map == NULL) {
+ DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
+ nb_rx_queues);
+ ret = -ENOMEM;
+ goto qv_map_alloc_err;
+ }
+
+ /* Rx interrupt disabled, Map interrupt only for writeback */
+
+ /* The capability flags adapter->caps.other_caps should be
+ * compared with bit VIRTCHNL2_CAP_WB_ON_ITR here. The if
+ * condition should be updated when the FW can return the
+ * correct flag bits.
+ */
+ dynctl_reg_start =
+ vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
+ itrn_reg_start =
+ vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
+ dynctl_val = IDPF_READ_REG(hw, dynctl_reg_start);
+ DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x", dynctl_val);
+ itrn_val = IDPF_READ_REG(hw, itrn_reg_start);
+ DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val);
+ /* Force write-backs by setting WB_ON_ITR bit in DYN_CTL
+ * register. WB_ON_ITR and INTENA are mutually exclusive
+ * bits. Setting WB_ON_ITR bits means TX and RX Descs
+ * are written back based on ITR expiration irrespective
+ * of INTENA setting.
+ */
+ /* TBD: need to tune INTERVAL value for better performance. */
+ itrn_val = (itrn_val == 0) ? IDPF_DFLT_INTERVAL : itrn_val;
+ dynctl_val = VIRTCHNL2_ITR_IDX_0 <<
+ PF_GLINT_DYN_CTL_ITR_INDX_S |
+ PF_GLINT_DYN_CTL_WB_ON_ITR_M |
+ itrn_val << PF_GLINT_DYN_CTL_INTERVAL_S;
+ IDPF_WRITE_REG(hw, dynctl_reg_start, dynctl_val);
+
+ for (i = 0; i < nb_rx_queues; i++) {
+ /* map all queues to the same vector */
+ qv_map[i].queue_id = qids[i];
+ qv_map[i].vector_id =
+ vport->recv_vectors->vchunks.vchunks->start_vector_id;
+ }
+ vport->qv_map = qv_map;
+
+ ret = idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, true);
+ if (ret != 0) {
+ DRV_LOG(ERR, "config interrupt mapping failed");
+ goto config_irq_map_err;
+ }
+
+ return 0;
+
+config_irq_map_err:
+ rte_free(vport->qv_map);
+ vport->qv_map = NULL;
+
+qv_map_alloc_err:
+ return ret;
+}
+
int
idpf_vport_irq_unmap_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
{
diff --git a/drivers/common/idpf/idpf_common_device.h b/drivers/common/idpf/idpf_common_device.h
index 112367dae8..f767ea7cec 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -200,5 +200,9 @@ int idpf_vport_info_init(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_info);
__rte_internal
void idpf_vport_stats_update(struct virtchnl2_vport_stats *oes, struct virtchnl2_vport_stats *nes);
+__rte_internal
+int idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport,
+ uint32_t *qids,
+ uint16_t nb_rx_queues);
#endif /* _IDPF_COMMON_DEVICE_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 25624732b0..0729f6b912 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -69,6 +69,7 @@ INTERNAL {
idpf_vport_info_init;
idpf_vport_init;
idpf_vport_irq_map_config;
+ idpf_vport_irq_map_config_by_qids;
idpf_vport_irq_unmap_config;
idpf_vport_rss_config;
idpf_vport_stats_update;
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v10 11/14] net/cpfl: enable write back based on ITR expire
2023-06-06 10:03 ` [PATCH v10 00/14] net/cpfl: add hairpin queue support beilei.xing
` (10 preceding siblings ...)
2023-06-06 10:03 ` [PATCH v10 10/14] common/idpf: add irq map config API beilei.xing
@ 2023-06-06 10:03 ` beilei.xing
2023-06-06 10:03 ` [PATCH v10 12/14] net/cpfl: support peer ports get beilei.xing
` (2 subsequent siblings)
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-06 10:03 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
This patch enables write back based on ITR expire
(WR_ON_ITR) for hairpin queues.
Signed-off-by: Mingxia Liu <mingxia.liu@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 4fb9aef68b..fedf2c1fa1 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -735,11 +735,22 @@ cpfl_dev_configure(struct rte_eth_dev *dev)
static int
cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
{
+ uint32_t qids[CPFL_MAX_P2P_NB_QUEUES + IDPF_DEFAULT_RXQ_NUM] = {0};
struct cpfl_vport *cpfl_vport = dev->data->dev_private;
struct idpf_vport *vport = &cpfl_vport->base;
uint16_t nb_rx_queues = dev->data->nb_rx_queues;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i;
- return idpf_vport_irq_map_config(vport, nb_rx_queues);
+ for (i = 0; i < nb_rx_queues; i++) {
+ cpfl_rxq = dev->data->rx_queues[i];
+ if (cpfl_rxq->hairpin_info.hairpin_q)
+ qids[i] = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid,
+ (i - cpfl_vport->nb_data_rxq));
+ else
+ qids[i] = cpfl_hw_qid_get(vport->chunks_info.rx_start_qid, i);
+ }
+ return idpf_vport_irq_map_config_by_qids(vport, qids, nb_rx_queues);
}
/* Update hairpin_info for dev's tx hairpin queue */
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v10 12/14] net/cpfl: support peer ports get
2023-06-06 10:03 ` [PATCH v10 00/14] net/cpfl: add hairpin queue support beilei.xing
` (11 preceding siblings ...)
2023-06-06 10:03 ` [PATCH v10 11/14] net/cpfl: enable write back based on ITR expire beilei.xing
@ 2023-06-06 10:03 ` beilei.xing
2023-06-06 10:03 ` [PATCH v10 13/14] net/cpfl: support hairpin bind/unbind beilei.xing
2023-06-06 10:03 ` [PATCH v10 14/14] doc: update the doc of CPFL PMD beilei.xing
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-06 10:03 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports get hairpin peer ports.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 41 ++++++++++++++++++++++++++++++++++
1 file changed, 41 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index fedf2c1fa1..507287ad9c 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1081,6 +1081,46 @@ cpfl_dev_close(struct rte_eth_dev *dev)
return 0;
}
+static int
+cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
+ size_t len, uint32_t tx)
+{
+ struct cpfl_vport *cpfl_vport =
+ (struct cpfl_vport *)dev->data->dev_private;
+ struct idpf_tx_queue *txq;
+ struct idpf_rx_queue *rxq;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ uint16_t i;
+ uint16_t j = 0;
+
+ if (len <= 0)
+ return -EINVAL;
+
+ if (cpfl_vport->p2p_q_chunks_info == NULL)
+ return -ENOTSUP;
+
+ if (tx > 0) {
+ for (i = cpfl_vport->nb_data_txq, j = 0; i < dev->data->nb_tx_queues; i++, j++) {
+ txq = dev->data->tx_queues[i];
+ if (txq == NULL || j >= len)
+ return -EINVAL;
+ cpfl_txq = (struct cpfl_tx_queue *)txq;
+ peer_ports[j] = cpfl_txq->hairpin_info.peer_rxp;
+ }
+ } else if (tx == 0) {
+ for (i = cpfl_vport->nb_data_rxq, j = 0; i < dev->data->nb_rx_queues; i++, j++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq == NULL || j >= len)
+ return -EINVAL;
+ cpfl_rxq = (struct cpfl_rx_queue *)rxq;
+ peer_ports[j] = cpfl_rxq->hairpin_info.peer_txp;
+ }
+ }
+
+ return j;
+}
+
static const struct eth_dev_ops cpfl_eth_dev_ops = {
.dev_configure = cpfl_dev_configure,
.dev_close = cpfl_dev_close,
@@ -1110,6 +1150,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.hairpin_cap_get = cpfl_hairpin_cap_get,
.rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
.tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
+ .hairpin_get_peer_ports = cpfl_hairpin_get_peer_ports,
};
static int
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v10 13/14] net/cpfl: support hairpin bind/unbind
2023-06-06 10:03 ` [PATCH v10 00/14] net/cpfl: add hairpin queue support beilei.xing
` (12 preceding siblings ...)
2023-06-06 10:03 ` [PATCH v10 12/14] net/cpfl: support peer ports get beilei.xing
@ 2023-06-06 10:03 ` beilei.xing
2023-06-06 10:03 ` [PATCH v10 14/14] doc: update the doc of CPFL PMD beilei.xing
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-06 10:03 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing, Xiao Wang
From: Beilei Xing <beilei.xing@intel.com>
This patch supports hairpin_bind/unbind ops.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/net/cpfl/cpfl_ethdev.c | 137 +++++++++++++++++++++++++++++++++
1 file changed, 137 insertions(+)
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 507287ad9c..e96bf0a82e 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1121,6 +1121,141 @@ cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
return j;
}
+static int
+cpfl_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct idpf_vport *tx_vport = &cpfl_tx_vport->base;
+ struct cpfl_vport *cpfl_rx_vport;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ struct rte_eth_dev *peer_dev;
+ struct idpf_vport *rx_vport;
+ int err = 0;
+ int i;
+
+ err = cpfl_txq_hairpin_info_update(dev, rx_port);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to update Tx hairpin queue info.");
+ return err;
+ }
+
+ /* configure hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ err = cpfl_hairpin_txq_config(tx_vport, cpfl_txq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue %u", i);
+ return err;
+ }
+ }
+
+ err = cpfl_hairpin_tx_complq_config(cpfl_tx_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Tx completion queue");
+ return err;
+ }
+
+ peer_dev = &rte_eth_devices[rx_port];
+ cpfl_rx_vport = (struct cpfl_vport *)peer_dev->data->dev_private;
+ rx_vport = &cpfl_rx_vport->base;
+ cpfl_rxq_hairpin_mz_bind(peer_dev);
+
+ err = cpfl_hairpin_rx_bufq_config(cpfl_rx_vport);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue");
+ return err;
+ }
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ err = cpfl_hairpin_rxq_config(rx_vport, cpfl_rxq);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue %u", i);
+ return err;
+ }
+ err = cpfl_rx_queue_init(peer_dev, i);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u", i);
+ return err;
+ }
+ }
+
+ /* enable hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport,
+ i - cpfl_tx_vport->nb_data_txq,
+ false, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u on",
+ i);
+ return err;
+ }
+ cpfl_txq->base.q_started = true;
+ }
+
+ err = cpfl_switch_hairpin_complq(cpfl_tx_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq");
+ return err;
+ }
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ err = cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport,
+ i - cpfl_rx_vport->nb_data_rxq,
+ true, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u on",
+ i);
+ }
+ cpfl_rxq->base.q_started = true;
+ }
+
+ err = cpfl_switch_hairpin_bufq(cpfl_rx_vport, true);
+ if (err != 0) {
+ PMD_DRV_LOG(ERR, "Failed to switch hairpin Rx buffer queue");
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+cpfl_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+ struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+ struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port];
+ struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private;
+ struct cpfl_tx_queue *cpfl_txq;
+ struct cpfl_rx_queue *cpfl_rxq;
+ int i;
+
+ /* disable hairpin queues */
+ for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+ cpfl_txq = dev->data->tx_queues[i];
+ cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport,
+ i - cpfl_tx_vport->nb_data_txq,
+ false, false);
+ cpfl_txq->base.q_started = false;
+ }
+
+ cpfl_switch_hairpin_complq(cpfl_tx_vport, false);
+
+ for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; i++) {
+ cpfl_rxq = peer_dev->data->rx_queues[i];
+ cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport,
+ i - cpfl_rx_vport->nb_data_rxq,
+ true, false);
+ cpfl_rxq->base.q_started = false;
+ }
+
+ cpfl_switch_hairpin_bufq(cpfl_rx_vport, false);
+
+ return 0;
+}
+
static const struct eth_dev_ops cpfl_eth_dev_ops = {
.dev_configure = cpfl_dev_configure,
.dev_close = cpfl_dev_close,
@@ -1151,6 +1286,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
.tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
.hairpin_get_peer_ports = cpfl_hairpin_get_peer_ports,
+ .hairpin_bind = cpfl_hairpin_bind,
+ .hairpin_unbind = cpfl_hairpin_unbind,
};
static int
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* [PATCH v10 14/14] doc: update the doc of CPFL PMD
2023-06-06 10:03 ` [PATCH v10 00/14] net/cpfl: add hairpin queue support beilei.xing
` (13 preceding siblings ...)
2023-06-06 10:03 ` [PATCH v10 13/14] net/cpfl: support hairpin bind/unbind beilei.xing
@ 2023-06-06 10:03 ` beilei.xing
14 siblings, 0 replies; 164+ messages in thread
From: beilei.xing @ 2023-06-06 10:03 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev, mingxia.liu, Beilei Xing
From: Beilei Xing <beilei.xing@intel.com>
Update cpfl.rst to clarify hairpin support.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
doc/guides/nics/cpfl.rst | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index d25db088eb..8d5c3082e4 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -106,3 +106,10 @@ The paths are chosen based on 2 conditions:
A value "P" means the offload feature is not supported by vector path.
If any not supported features are used, cpfl vector PMD is disabled
and the scalar paths are chosen.
+
+Hairpin queue
+~~~~~~~~~~~~~
+
+ E2100 Series can loopback packets from RX port to TX port, this feature is
+ called port-to-port or hairpin.
+ Currently, the PMD only supports single port hairpin.
--
2.26.2
^ permalink raw reply [flat|nested] 164+ messages in thread
* RE: [PATCH v10 00/14] net/cpfl: add hairpin queue support
2023-06-06 6:40 ` Wu, Jingjing
@ 2023-06-07 7:16 ` Zhang, Qi Z
0 siblings, 0 replies; 164+ messages in thread
From: Zhang, Qi Z @ 2023-06-07 7:16 UTC (permalink / raw)
To: Wu, Jingjing, Xing, Beilei; +Cc: dev, Liu, Mingxia
> -----Original Message-----
> From: Wu, Jingjing <jingjing.wu@intel.com>
> Sent: Tuesday, June 6, 2023 2:41 PM
> To: Xing, Beilei <beilei.xing@intel.com>
> Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>
> Subject: RE: [PATCH v10 00/14] net/cpfl: add hairpin queue support
>
>
>
> > -----Original Message-----
> > From: Xing, Beilei <beilei.xing@intel.com>
> > Sent: Tuesday, June 6, 2023 6:03 PM
> > To: Wu, Jingjing <jingjing.wu@intel.com>
> > Cc: dev@dpdk.org; Liu, Mingxia <mingxia.liu@intel.com>; Xing, Beilei
> > <beilei.xing@intel.com>
> > Subject: [PATCH v10 00/14] net/cpfl: add hairpin queue support
> >
> > From: Beilei Xing <beilei.xing@intel.com>
> >
> > This patchset adds hairpin queue support.
> >
> > v2 changes:
> > - change hairpin rx queus configuration sequence.
> > - code refine.
> >
> > v3 changes:
> > - Refine the patchset based on the latest code.
> >
> > v4 change:
> > - Remove hairpin rx buffer queue's sw_ring.
> > - Change hairpin rx queus configuration sequence in cpfl_hairpin_bind
> function.
> > - Refind hairpin queue setup and release.
> >
> > v5 change:
> > - Fix memory leak during queue setup.
> > - Refine hairpin Rxq/Txq start/stop.
> >
> > v6 change:
> > - Add sign-off.
> >
> > v7 change:
> > - Update cpfl.rst
> >
> > v8 change:
> > - Fix Intel-compilation failure.
> >
> > v9 change:
> > - Fix memory leak if fail to init queue group.
> > - Change log level.
> >
> > v10 change:
> > - Avoid accessing out-of-bounds.
> >
> > Beilei Xing (14):
> > net/cpfl: refine structures
> > common/idpf: support queue groups add/delete
> > net/cpfl: add haipin queue group during vport init
> > net/cpfl: support hairpin queue capbility get
> > net/cpfl: support hairpin queue setup and release
> > common/idpf: add queue config API
> > net/cpfl: support hairpin queue configuration
> > common/idpf: add switch queue API
> > net/cpfl: support hairpin queue start/stop
> > common/idpf: add irq map config API
> > net/cpfl: enable write back based on ITR expire
> > net/cpfl: support peer ports get
> > net/cpfl: support hairpin bind/unbind
> > doc: update the doc of CPFL PMD
> >
> > doc/guides/nics/cpfl.rst | 7 +
> > drivers/common/idpf/idpf_common_device.c | 75 ++
> > drivers/common/idpf/idpf_common_device.h | 4 +
> > drivers/common/idpf/idpf_common_virtchnl.c | 138 +++-
> > drivers/common/idpf/idpf_common_virtchnl.h | 18 +
> > drivers/common/idpf/version.map | 6 +
> > drivers/net/cpfl/cpfl_ethdev.c | 613 ++++++++++++++--
> > drivers/net/cpfl/cpfl_ethdev.h | 35 +-
> > drivers/net/cpfl/cpfl_rxtx.c | 789 +++++++++++++++++++--
> > drivers/net/cpfl/cpfl_rxtx.h | 76 ++
> > drivers/net/cpfl/cpfl_rxtx_vec_common.h | 21 +-
> > 11 files changed, 1663 insertions(+), 119 deletions(-)
> >
> > --
> > 2.26.2
>
> Acked-by: Jingjing Wu <jingjing.wu@intel.com>
Applied to dpdk-next-net-intel.
Thanks
Qi
^ permalink raw reply [flat|nested] 164+ messages in thread
end of thread, other threads:[~2023-06-07 7:16 UTC | newest]
Thread overview: 164+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-04-21 6:50 [PATCH 00/10] add hairpin queue support beilei.xing
2023-04-21 6:50 ` [PATCH 01/10] net/cpfl: refine structures beilei.xing
2023-04-21 6:50 ` [PATCH 02/10] net/cpfl: support hairpin queue capbility get beilei.xing
2023-04-21 6:50 ` [PATCH 03/10] common/idpf: support queue groups add/delete beilei.xing
2023-04-24 8:48 ` Liu, Mingxia
2023-04-24 8:49 ` Liu, Mingxia
2023-05-19 5:36 ` Xing, Beilei
2023-04-21 6:50 ` [PATCH 04/10] net/cpfl: add haipin queue group during vpotr init beilei.xing
2023-04-24 8:55 ` Liu, Mingxia
2023-05-19 5:36 ` Xing, Beilei
2023-04-21 6:50 ` [PATCH 05/10] net/cpfl: support hairpin queue setup and release beilei.xing
2023-04-21 6:50 ` [PATCH 06/10] net/cpfl: support hairpin queue configuration beilei.xing
2023-04-24 9:48 ` Liu, Mingxia
2023-05-19 5:43 ` Xing, Beilei
2023-04-21 6:50 ` [PATCH 07/10] net/cpfl: support hairpin queue start/stop beilei.xing
2023-04-21 6:50 ` [PATCH 08/10] net/cpfl: enable write back based on ITR expire beilei.xing
2023-04-21 6:50 ` [PATCH 09/10] net/cpfl: support peer ports get beilei.xing
2023-04-21 6:50 ` [PATCH 10/10] net/cpfl: support hairpin bind/unbind beilei.xing
2023-05-19 5:10 ` [PATCH v2 00/10] add hairpin queue support beilei.xing
2023-05-19 5:10 ` [PATCH v2 01/10] net/cpfl: refine structures beilei.xing
2023-05-19 5:10 ` [PATCH v2 02/10] net/cpfl: support hairpin queue capbility get beilei.xing
2023-05-19 5:10 ` [PATCH v2 03/10] common/idpf: support queue groups add/delete beilei.xing
2023-05-19 5:10 ` [PATCH v2 04/10] net/cpfl: add haipin queue group during vport init beilei.xing
2023-05-19 5:10 ` [PATCH v2 04/10] net/cpfl: add haipin queue group during vpotr init beilei.xing
2023-05-19 5:10 ` [PATCH v2 05/10] net/cpfl: support hairpin queue setup and release beilei.xing
2023-05-19 5:10 ` [PATCH v2 06/10] net/cpfl: support hairpin queue configuration beilei.xing
2023-05-19 5:10 ` [PATCH v2 07/10] net/cpfl: support hairpin queue start/stop beilei.xing
2023-05-19 5:10 ` [PATCH v2 08/10] net/cpfl: enable write back based on ITR expire beilei.xing
2023-05-19 5:10 ` [PATCH v2 09/10] net/cpfl: support peer ports get beilei.xing
2023-05-19 5:10 ` [PATCH v2 10/10] net/cpfl: support hairpin bind/unbind beilei.xing
2023-05-19 7:31 ` [PATCH v3 00/10] net/cpfl: add hairpin queue support beilei.xing
2023-05-19 7:31 ` [PATCH v3 01/10] net/cpfl: refine structures beilei.xing
2023-05-19 7:31 ` [PATCH v3 02/10] net/cpfl: support hairpin queue capbility get beilei.xing
2023-05-24 14:30 ` Wu, Jingjing
2023-05-19 7:31 ` [PATCH v3 03/10] common/idpf: support queue groups add/delete beilei.xing
2023-05-19 7:31 ` [PATCH v3 04/10] net/cpfl: add haipin queue group during vport init beilei.xing
2023-05-24 14:38 ` Wu, Jingjing
2023-05-19 7:31 ` [PATCH v3 05/10] net/cpfl: support hairpin queue setup and release beilei.xing
2023-05-24 9:01 ` Liu, Mingxia
2023-05-26 3:46 ` Xing, Beilei
2023-05-25 3:58 ` Wu, Jingjing
2023-05-26 3:52 ` Xing, Beilei
2023-05-19 7:31 ` [PATCH v3 06/10] net/cpfl: support hairpin queue configuration beilei.xing
2023-05-19 7:31 ` [PATCH v3 07/10] net/cpfl: support hairpin queue start/stop beilei.xing
2023-05-25 4:12 ` Wu, Jingjing
2023-05-19 7:31 ` [PATCH v3 08/10] net/cpfl: enable write back based on ITR expire beilei.xing
2023-05-25 4:17 ` Wu, Jingjing
2023-05-19 7:31 ` [PATCH v3 09/10] net/cpfl: support peer ports get beilei.xing
2023-05-25 5:26 ` Wu, Jingjing
2023-05-19 7:31 ` [PATCH v3 10/10] net/cpfl: support hairpin bind/unbind beilei.xing
2023-05-26 7:38 ` [PATCH v4 00/13] net/cpfl: add hairpin queue support beilei.xing
2023-05-26 7:38 ` [PATCH v4 01/13] net/cpfl: refine structures beilei.xing
2023-05-26 7:38 ` [PATCH v4 02/13] common/idpf: support queue groups add/delete beilei.xing
2023-05-26 7:38 ` [PATCH v4 03/13] net/cpfl: add haipin queue group during vport init beilei.xing
2023-05-26 7:38 ` [PATCH v4 04/13] net/cpfl: support hairpin queue capbility get beilei.xing
2023-05-26 7:38 ` [PATCH v4 05/13] net/cpfl: support hairpin queue setup and release beilei.xing
2023-05-30 2:27 ` Liu, Mingxia
2023-05-30 2:49 ` Liu, Mingxia
2023-05-31 10:53 ` Xing, Beilei
2023-05-26 7:38 ` [PATCH v4 06/13] common/idpf: add queue config API beilei.xing
2023-05-26 7:38 ` [PATCH v4 07/13] net/cpfl: support hairpin queue configuration beilei.xing
2023-05-26 7:38 ` [PATCH v4 08/13] common/idpf: add switch queue API beilei.xing
2023-05-26 7:38 ` [PATCH v4 09/13] net/cpfl: support hairpin queue start/stop beilei.xing
2023-05-30 3:30 ` Liu, Mingxia
2023-05-31 10:53 ` Xing, Beilei
2023-05-26 7:38 ` [PATCH v4 10/13] common/idpf: add irq map config API beilei.xing
2023-05-26 7:38 ` [PATCH v4 11/13] net/cpfl: enable write back based on ITR expire beilei.xing
2023-05-26 7:38 ` [PATCH v4 12/13] net/cpfl: support peer ports get beilei.xing
2023-05-26 7:38 ` [PATCH v4 13/13] net/cpfl: support hairpin bind/unbind beilei.xing
2023-05-30 3:59 ` Liu, Mingxia
2023-05-31 10:54 ` Xing, Beilei
2023-05-31 10:18 ` [PATCH v5 00/13] net/cpfl: add hairpin queue support beilei.xing
2023-05-31 10:18 ` [PATCH v5 01/13] net/cpfl: refine structures beilei.xing
2023-05-31 10:18 ` [PATCH v5 02/13] common/idpf: support queue groups add/delete beilei.xing
2023-05-31 10:18 ` [PATCH v5 03/13] net/cpfl: add haipin queue group during vport init beilei.xing
2023-05-31 10:18 ` [PATCH v5 04/13] net/cpfl: support hairpin queue capbility get beilei.xing
2023-05-31 10:18 ` [PATCH v5 05/13] net/cpfl: support hairpin queue setup and release beilei.xing
2023-05-31 10:18 ` [PATCH v5 06/13] common/idpf: add queue config API beilei.xing
2023-05-31 10:18 ` [PATCH v5 07/13] net/cpfl: support hairpin queue configuration beilei.xing
2023-05-31 10:18 ` [PATCH v5 08/13] common/idpf: add switch queue API beilei.xing
2023-05-31 10:18 ` [PATCH v5 09/13] net/cpfl: support hairpin queue start/stop beilei.xing
2023-05-31 10:18 ` [PATCH v5 10/13] common/idpf: add irq map config API beilei.xing
2023-05-31 10:18 ` [PATCH v5 11/13] net/cpfl: enable write back based on ITR expire beilei.xing
2023-05-31 10:18 ` [PATCH v5 12/13] net/cpfl: support peer ports get beilei.xing
2023-05-31 10:18 ` [PATCH v5 13/13] net/cpfl: support hairpin bind/unbind beilei.xing
2023-05-31 10:25 ` [PATCH v6 00/13] net/cpfl: add hairpin queue support beilei.xing
2023-05-31 10:25 ` [PATCH v6 01/13] net/cpfl: refine structures beilei.xing
2023-05-31 10:25 ` [PATCH v6 02/13] common/idpf: support queue groups add/delete beilei.xing
2023-05-31 10:25 ` [PATCH v6 03/13] net/cpfl: add haipin queue group during vport init beilei.xing
2023-05-31 10:25 ` [PATCH v6 04/13] net/cpfl: support hairpin queue capbility get beilei.xing
2023-05-31 10:25 ` [PATCH v6 05/13] net/cpfl: support hairpin queue setup and release beilei.xing
2023-05-31 10:25 ` [PATCH v6 06/13] common/idpf: add queue config API beilei.xing
2023-05-31 10:25 ` [PATCH v6 07/13] net/cpfl: support hairpin queue configuration beilei.xing
2023-05-31 10:25 ` [PATCH v6 08/13] common/idpf: add switch queue API beilei.xing
2023-05-31 10:25 ` [PATCH v6 09/13] net/cpfl: support hairpin queue start/stop beilei.xing
2023-05-31 10:25 ` [PATCH v6 10/13] common/idpf: add irq map config API beilei.xing
2023-05-31 10:25 ` [PATCH v6 11/13] net/cpfl: enable write back based on ITR expire beilei.xing
2023-05-31 10:25 ` [PATCH v6 12/13] net/cpfl: support peer ports get beilei.xing
2023-05-31 10:25 ` [PATCH v6 13/13] net/cpfl: support hairpin bind/unbind beilei.xing
2023-05-31 13:04 ` [PATCH v7 00/14] net/cpfl: add hairpin queue support beilei.xing
2023-05-31 13:04 ` [PATCH v7 01/14] net/cpfl: refine structures beilei.xing
2023-05-31 13:04 ` [PATCH v7 02/14] common/idpf: support queue groups add/delete beilei.xing
2023-05-31 13:04 ` [PATCH v7 03/14] net/cpfl: add haipin queue group during vport init beilei.xing
2023-05-31 13:04 ` [PATCH v7 04/14] net/cpfl: support hairpin queue capbility get beilei.xing
2023-05-31 13:04 ` [PATCH v7 05/14] net/cpfl: support hairpin queue setup and release beilei.xing
2023-05-31 13:04 ` [PATCH v7 06/14] common/idpf: add queue config API beilei.xing
2023-05-31 13:04 ` [PATCH v7 07/14] net/cpfl: support hairpin queue configuration beilei.xing
2023-05-31 13:04 ` [PATCH v7 08/14] common/idpf: add switch queue API beilei.xing
2023-05-31 13:04 ` [PATCH v7 09/14] net/cpfl: support hairpin queue start/stop beilei.xing
2023-05-31 13:04 ` [PATCH v7 10/14] common/idpf: add irq map config API beilei.xing
2023-05-31 13:04 ` [PATCH v7 11/14] net/cpfl: enable write back based on ITR expire beilei.xing
2023-05-31 13:04 ` [PATCH v7 12/14] net/cpfl: support peer ports get beilei.xing
2023-05-31 13:04 ` [PATCH v7 13/14] net/cpfl: support hairpin bind/unbind beilei.xing
2023-05-31 13:04 ` [PATCH v7 14/14] doc: update the doc of CPFL PMD beilei.xing
2023-06-05 6:17 ` [PATCH v8 00/14] net/cpfl: add hairpin queue support beilei.xing
2023-06-05 6:17 ` [PATCH v8 01/14] net/cpfl: refine structures beilei.xing
2023-06-05 6:17 ` [PATCH v8 02/14] common/idpf: support queue groups add/delete beilei.xing
2023-06-05 6:17 ` [PATCH v8 03/14] net/cpfl: add haipin queue group during vport init beilei.xing
2023-06-05 8:35 ` Wu, Jingjing
2023-06-05 8:53 ` Xing, Beilei
2023-06-05 6:17 ` [PATCH v8 04/14] net/cpfl: support hairpin queue capbility get beilei.xing
2023-06-05 6:17 ` [PATCH v8 05/14] net/cpfl: support hairpin queue setup and release beilei.xing
2023-06-05 6:17 ` [PATCH v8 06/14] common/idpf: add queue config API beilei.xing
2023-06-05 6:17 ` [PATCH v8 07/14] net/cpfl: support hairpin queue configuration beilei.xing
2023-06-05 6:17 ` [PATCH v8 08/14] common/idpf: add switch queue API beilei.xing
2023-06-05 6:17 ` [PATCH v8 09/14] net/cpfl: support hairpin queue start/stop beilei.xing
2023-06-05 6:17 ` [PATCH v8 10/14] common/idpf: add irq map config API beilei.xing
2023-06-05 6:17 ` [PATCH v8 11/14] net/cpfl: enable write back based on ITR expire beilei.xing
2023-06-05 6:17 ` [PATCH v8 12/14] net/cpfl: support peer ports get beilei.xing
2023-06-05 11:22 ` Wu, Jingjing
2023-06-05 6:17 ` [PATCH v8 13/14] net/cpfl: support hairpin bind/unbind beilei.xing
2023-06-05 6:17 ` [PATCH v8 14/14] doc: update the doc of CPFL PMD beilei.xing
2023-06-05 9:06 ` [PATCH v9 00/14] net/cpfl: add hairpin queue support beilei.xing
2023-06-05 9:06 ` [PATCH v9 01/14] net/cpfl: refine structures beilei.xing
2023-06-05 9:06 ` [PATCH v9 02/14] common/idpf: support queue groups add/delete beilei.xing
2023-06-05 9:06 ` [PATCH v9 03/14] net/cpfl: add haipin queue group during vport init beilei.xing
2023-06-05 9:06 ` [PATCH v9 04/14] net/cpfl: support hairpin queue capbility get beilei.xing
2023-06-05 9:06 ` [PATCH v9 05/14] net/cpfl: support hairpin queue setup and release beilei.xing
2023-06-05 9:06 ` [PATCH v9 06/14] common/idpf: add queue config API beilei.xing
2023-06-05 9:06 ` [PATCH v9 07/14] net/cpfl: support hairpin queue configuration beilei.xing
2023-06-05 9:06 ` [PATCH v9 08/14] common/idpf: add switch queue API beilei.xing
2023-06-05 9:06 ` [PATCH v9 09/14] net/cpfl: support hairpin queue start/stop beilei.xing
2023-06-05 9:06 ` [PATCH v9 10/14] common/idpf: add irq map config API beilei.xing
2023-06-05 9:06 ` [PATCH v9 11/14] net/cpfl: enable write back based on ITR expire beilei.xing
2023-06-05 9:06 ` [PATCH v9 12/14] net/cpfl: support peer ports get beilei.xing
2023-06-05 9:06 ` [PATCH v9 13/14] net/cpfl: support hairpin bind/unbind beilei.xing
2023-06-05 9:06 ` [PATCH v9 14/14] doc: update the doc of CPFL PMD beilei.xing
2023-06-06 10:03 ` [PATCH v10 00/14] net/cpfl: add hairpin queue support beilei.xing
2023-06-06 6:40 ` Wu, Jingjing
2023-06-07 7:16 ` Zhang, Qi Z
2023-06-06 10:03 ` [PATCH v10 01/14] net/cpfl: refine structures beilei.xing
2023-06-06 10:03 ` [PATCH v10 02/14] common/idpf: support queue groups add/delete beilei.xing
2023-06-06 10:03 ` [PATCH v10 03/14] net/cpfl: add haipin queue group during vport init beilei.xing
2023-06-06 10:03 ` [PATCH v10 04/14] net/cpfl: support hairpin queue capbility get beilei.xing
2023-06-06 10:03 ` [PATCH v10 05/14] net/cpfl: support hairpin queue setup and release beilei.xing
2023-06-06 10:03 ` [PATCH v10 06/14] common/idpf: add queue config API beilei.xing
2023-06-06 10:03 ` [PATCH v10 07/14] net/cpfl: support hairpin queue configuration beilei.xing
2023-06-06 10:03 ` [PATCH v10 08/14] common/idpf: add switch queue API beilei.xing
2023-06-06 10:03 ` [PATCH v10 09/14] net/cpfl: support hairpin queue start/stop beilei.xing
2023-06-06 10:03 ` [PATCH v10 10/14] common/idpf: add irq map config API beilei.xing
2023-06-06 10:03 ` [PATCH v10 11/14] net/cpfl: enable write back based on ITR expire beilei.xing
2023-06-06 10:03 ` [PATCH v10 12/14] net/cpfl: support peer ports get beilei.xing
2023-06-06 10:03 ` [PATCH v10 13/14] net/cpfl: support hairpin bind/unbind beilei.xing
2023-06-06 10:03 ` [PATCH v10 14/14] doc: update the doc of CPFL PMD beilei.xing
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).