DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] net/nfp: fix invalid control message packets
@ 2023-07-13  3:02 Chaoyong He
  2023-07-13  8:14 ` Ferruh Yigit
  2023-09-20 12:35 ` Ferruh Yigit
  0 siblings, 2 replies; 3+ messages in thread
From: Chaoyong He @ 2023-07-13  3:02 UTC (permalink / raw)
  To: dev; +Cc: oss-drivers, niklas.soderlund, Long Wu, chaoyong.he, stable

From: Long Wu <long.wu@corigine.com>

If we add two cards that uses flower firmware into one dpdk-testpmd,
NFP PMD will printf error log. The reason is that the second card
uses the control VNIC Rx queue of the first card.

Because rte_eth_dma_zone_reserve() will reserve new DMA zone if
DMA zone's name is unique. But if there is already a zone with the
same name, rte_eth_dma_zone_reserve() will return the pointer of
the previously DMA zone. We try to reserve DMA zone for each card
but we use the same name to reserve.

We use the PCI address to give control VNIC a unique ring name
to avoid the above situation and let each NIC's ring have its
own DMA zone.

Fixes: 945441ebdb9c ("net/nfp: add flower ctrl VNIC")
Cc: chaoyong.he@corigine.com
Cc: stable@dpdk.org

Signed-off-by: Long Wu <long.wu@corigine.com>
---
 drivers/net/nfp/flower/nfp_flower.c | 32 +++++++++++++++++++++--------
 1 file changed, 23 insertions(+), 9 deletions(-)

diff --git a/drivers/net/nfp/flower/nfp_flower.c b/drivers/net/nfp/flower/nfp_flower.c
index 77dab864f3..9a75aa5413 100644
--- a/drivers/net/nfp/flower/nfp_flower.c
+++ b/drivers/net/nfp/flower/nfp_flower.c
@@ -385,6 +385,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 	int ret = 0;
 	uint16_t n_txq;
 	uint16_t n_rxq;
+	const char *pci_name;
 	unsigned int numa_node;
 	struct rte_mempool *mp;
 	struct nfp_net_rxq *rxq;
@@ -393,6 +394,8 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 	struct rte_eth_dev *eth_dev;
 	const struct rte_memzone *tz;
 	struct nfp_app_fw_flower *app_fw_flower;
+	char ctrl_rxring_name[RTE_MEMZONE_NAMESIZE];
+	char ctrl_txring_name[RTE_MEMZONE_NAMESIZE];
 	char ctrl_pktmbuf_pool_name[RTE_MEMZONE_NAMESIZE];
 
 	/* Set up some pointers here for ease of use */
@@ -425,10 +428,12 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 		goto eth_dev_cleanup;
 	}
 
+	pci_name = strchr(pf_dev->pci_dev->name, ':') + 1;
+
 	/* Create a mbuf pool for the ctrl vNIC */
 	numa_node = rte_socket_id();
 	snprintf(ctrl_pktmbuf_pool_name, sizeof(ctrl_pktmbuf_pool_name),
-			"%s_ctrlmp", (strchr(pf_dev->pci_dev->name, ':') + 1));
+			"%s_ctrlmp", pci_name);
 	app_fw_flower->ctrl_pktmbuf_pool =
 			rte_pktmbuf_pool_create(ctrl_pktmbuf_pool_name,
 			4 * CTRL_VNIC_NB_DESC, 64, 0, 9216, numa_node);
@@ -467,6 +472,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 	eth_dev->data->nb_rx_queues = n_txq;
 	eth_dev->data->dev_private = hw;
 
+	snprintf(ctrl_rxring_name, sizeof(ctrl_rxring_name), "%s_ctrx_ring", pci_name);
 	/* Set up the Rx queues */
 	for (i = 0; i < n_rxq; i++) {
 		rxq = rte_zmalloc_socket("ethdev RX queue",
@@ -502,7 +508,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 		 * handle the maximum ring size is allocated in order to allow for
 		 * resizing in later calls to the queue setup function.
 		 */
-		tz = rte_eth_dma_zone_reserve(eth_dev, "ctrl_rx_ring", i,
+		tz = rte_eth_dma_zone_reserve(eth_dev, ctrl_rxring_name, i,
 				sizeof(struct nfp_net_rx_desc) * NFP_NET_MAX_RX_DESC,
 				NFP_MEMZONE_ALIGN, numa_node);
 		if (tz == NULL) {
@@ -521,7 +527,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 				sizeof(*rxq->rxbufs) * CTRL_VNIC_NB_DESC,
 				RTE_CACHE_LINE_SIZE, numa_node);
 		if (rxq->rxbufs == NULL) {
-			rte_eth_dma_zone_free(eth_dev, "ctrl_rx_ring", i);
+			rte_eth_dma_zone_free(eth_dev, ctrl_rxring_name, i);
 			rte_free(rxq);
 			ret = -ENOMEM;
 			goto rx_queue_setup_cleanup;
@@ -539,6 +545,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 		nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(i), rte_log2_u32(CTRL_VNIC_NB_DESC));
 	}
 
+	snprintf(ctrl_txring_name, sizeof(ctrl_txring_name), "%s_cttx_ring", pci_name);
 	/* Set up the Tx queues */
 	for (i = 0; i < n_txq; i++) {
 		txq = rte_zmalloc_socket("ethdev TX queue",
@@ -557,7 +564,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 		 * handle the maximum ring size is allocated in order to allow for
 		 * resizing in later calls to the queue setup function.
 		 */
-		tz = rte_eth_dma_zone_reserve(eth_dev, "ctrl_tx_ring", i,
+		tz = rte_eth_dma_zone_reserve(eth_dev, ctrl_txring_name, i,
 				sizeof(struct nfp_net_nfd3_tx_desc) * NFP_NET_MAX_TX_DESC,
 				NFP_MEMZONE_ALIGN, numa_node);
 		if (tz == NULL) {
@@ -584,7 +591,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 				sizeof(*txq->txbufs) * CTRL_VNIC_NB_DESC,
 				RTE_CACHE_LINE_SIZE, numa_node);
 		if (txq->txbufs == NULL) {
-			rte_eth_dma_zone_free(eth_dev, "ctrl_tx_ring", i);
+			rte_eth_dma_zone_free(eth_dev, ctrl_txring_name, i);
 			rte_free(txq);
 			ret = -ENOMEM;
 			goto tx_queue_setup_cleanup;
@@ -609,7 +616,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 		txq = eth_dev->data->tx_queues[i];
 		if (txq != NULL) {
 			rte_free(txq->txbufs);
-			rte_eth_dma_zone_free(eth_dev, "ctrl_tx_ring", i);
+			rte_eth_dma_zone_free(eth_dev, ctrl_txring_name, i);
 			rte_free(txq);
 		}
 	}
@@ -618,7 +625,7 @@ nfp_flower_init_ctrl_vnic(struct nfp_net_hw *hw)
 		rxq = eth_dev->data->rx_queues[i];
 		if (rxq != NULL) {
 			rte_free(rxq->rxbufs);
-			rte_eth_dma_zone_free(eth_dev, "ctrl_rx_ring", i);
+			rte_eth_dma_zone_free(eth_dev, ctrl_rxring_name, i);
 			rte_free(rxq);
 		}
 	}
@@ -639,28 +646,35 @@ static void
 nfp_flower_cleanup_ctrl_vnic(struct nfp_net_hw *hw)
 {
 	uint32_t i;
+	const char *pci_name;
 	struct nfp_net_rxq *rxq;
 	struct nfp_net_txq *txq;
 	struct rte_eth_dev *eth_dev;
 	struct nfp_app_fw_flower *app_fw_flower;
+	char ctrl_txring_name[RTE_MEMZONE_NAMESIZE];
+	char ctrl_rxring_name[RTE_MEMZONE_NAMESIZE];
 
 	eth_dev = hw->eth_dev;
 	app_fw_flower = NFP_PRIV_TO_APP_FW_FLOWER(hw->pf_dev->app_fw_priv);
 
+	pci_name = strchr(app_fw_flower->pf_hw->pf_dev->pci_dev->name, ':') + 1;
+
+	snprintf(ctrl_txring_name, sizeof(ctrl_txring_name), "%s_cttx_ring", pci_name);
 	for (i = 0; i < hw->max_tx_queues; i++) {
 		txq = eth_dev->data->tx_queues[i];
 		if (txq != NULL) {
 			rte_free(txq->txbufs);
-			rte_eth_dma_zone_free(eth_dev, "ctrl_tx_ring", i);
+			rte_eth_dma_zone_free(eth_dev, ctrl_txring_name, i);
 			rte_free(txq);
 		}
 	}
 
+	snprintf(ctrl_rxring_name, sizeof(ctrl_rxring_name), "%s_ctrx_ring", pci_name);
 	for (i = 0; i < hw->max_rx_queues; i++) {
 		rxq = eth_dev->data->rx_queues[i];
 		if (rxq != NULL) {
 			rte_free(rxq->rxbufs);
-			rte_eth_dma_zone_free(eth_dev, "ctrl_rx_ring", i);
+			rte_eth_dma_zone_free(eth_dev, ctrl_rxring_name, i);
 			rte_free(rxq);
 		}
 	}
-- 
2.39.1


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2023-09-20 12:35 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-07-13  3:02 [PATCH] net/nfp: fix invalid control message packets Chaoyong He
2023-07-13  8:14 ` Ferruh Yigit
2023-09-20 12:35 ` Ferruh Yigit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).