DPDK patches and discussions
 help / color / mirror / Atom feed
From: Lance Richardson <lance.richardson@broadcom.com>
To: dev@dpdk.org
Cc: ajit.khaparde@broadcom.com, somnath.kotur@broadcom.com,
	ferruh.yigit@intel.com, thomas@monjalon.net,
	Lance Richardson <lance.richardson@broadcom.com>
Subject: [dpdk-dev] [PATCH] net/bnxt: use dedicated cpr for async events
Date: Wed, 24 Jul 2019 12:14:29 -0400	[thread overview]
Message-ID: <20190724161429.11946-1-lance.richardson@broadcom.com> (raw)
In-Reply-To: <20190718033616.37605-10-ajit.khaparde@broadcom.com>

This commit enables the creation of a dedicated completion
ring for asynchronous event handling instead of handling these
events on a receive completion ring.

For the stingray platform and other platforms needing tighter
control of resource utilization, we retain the ability to
process async events on a receive completion ring. This behavior
is controlled by a compile-time configuration variable.

For Thor-based adapters, we use a dedicated NQ (notification
queue) ring for async events (async events can't currently
be received on a completion ring due to a firmware limitation).

Rename "def_cp_ring" to "async_cp_ring" to better reflect its
purpose (async event notifications) and to avoid confusion with
VNIC default receive completion rings.

Signed-off-by: Lance Richardson <lance.richardson@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
 drivers/net/bnxt/bnxt.h              |  10 +-
 drivers/net/bnxt/bnxt_ethdev.c       |  19 +++-
 drivers/net/bnxt/bnxt_hwrm.c         |  16 +--
 drivers/net/bnxt/bnxt_hwrm.h         |   2 +
 drivers/net/bnxt/bnxt_irq.c          |  44 +++++---
 drivers/net/bnxt/bnxt_ring.c         | 145 +++++++++++++++++++++++----
 drivers/net/bnxt/bnxt_ring.h         |   3 +
 drivers/net/bnxt/bnxt_rxr.c          |   2 +-
 drivers/net/bnxt/bnxt_rxtx_vec_sse.c |   2 +-
 9 files changed, 197 insertions(+), 46 deletions(-)

diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 93194bb52..0c9f994ea 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -33,6 +33,12 @@
 #define BNXT_MAX_RX_RING_DESC	8192
 #define BNXT_DB_SIZE		0x80
 
+#ifdef RTE_ARCH_ARM64
+#define BNXT_NUM_ASYNC_CPR(bp) (BNXT_STINGRAY(bp) ? 0 : 1)
+#else
+#define BNXT_NUM_ASYNC_CPR(bp) 1
+#endif
+
 /* Chimp Communication Channel */
 #define GRCPF_REG_CHIMP_CHANNEL_OFFSET		0x0
 #define GRCPF_REG_CHIMP_COMM_TRIGGER		0x100
@@ -351,6 +357,7 @@ struct bnxt {
 #define BNXT_FLAG_TRUSTED_VF_EN	(1 << 11)
 #define BNXT_FLAG_DFLT_VNIC_SET	(1 << 12)
 #define BNXT_FLAG_THOR_CHIP	(1 << 13)
+#define BNXT_FLAG_STINGRAY	(1 << 14)
 #define BNXT_FLAG_EXT_STATS_SUPPORTED	(1 << 29)
 #define BNXT_FLAG_NEW_RM	(1 << 30)
 #define BNXT_FLAG_INIT_DONE	(1U << 31)
@@ -363,6 +370,7 @@ struct bnxt {
 #define BNXT_USE_KONG(bp)	((bp)->flags & BNXT_FLAG_KONG_MB_EN)
 #define BNXT_VF_IS_TRUSTED(bp)	((bp)->flags & BNXT_FLAG_TRUSTED_VF_EN)
 #define BNXT_CHIP_THOR(bp)	((bp)->flags & BNXT_FLAG_THOR_CHIP)
+#define BNXT_STINGRAY(bp)	((bp)->flags & BNXT_FLAG_STINGRAY)
 #define BNXT_HAS_NQ(bp)		BNXT_CHIP_THOR(bp)
 #define BNXT_HAS_RING_GRPS(bp)	(!BNXT_CHIP_THOR(bp))
 
@@ -387,7 +395,7 @@ struct bnxt {
 	uint16_t		fw_tx_port_stats_ext_size;
 
 	/* Default completion ring */
-	struct bnxt_cp_ring_info	*def_cp_ring;
+	struct bnxt_cp_ring_info	*async_cp_ring;
 	uint32_t		max_ring_grps;
 	struct bnxt_ring_grp_info	*grp_info;
 
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index ded970644..2a8b50296 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -200,12 +200,17 @@ static void bnxt_free_mem(struct bnxt *bp)
 	bnxt_free_stats(bp);
 	bnxt_free_tx_rings(bp);
 	bnxt_free_rx_rings(bp);
+	bnxt_free_async_cp_ring(bp);
 }
 
 static int bnxt_alloc_mem(struct bnxt *bp)
 {
 	int rc;
 
+	rc = bnxt_alloc_async_ring_struct(bp);
+	if (rc)
+		goto alloc_mem_err;
+
 	rc = bnxt_alloc_vnic_mem(bp);
 	if (rc)
 		goto alloc_mem_err;
@@ -218,6 +223,10 @@ static int bnxt_alloc_mem(struct bnxt *bp)
 	if (rc)
 		goto alloc_mem_err;
 
+	rc = bnxt_alloc_async_cp_ring(bp);
+	if (rc)
+		goto alloc_mem_err;
+
 	return 0;
 
 alloc_mem_err:
@@ -617,8 +626,8 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
 	/* Inherit new configurations */
 	if (eth_dev->data->nb_rx_queues > bp->max_rx_rings ||
 	    eth_dev->data->nb_tx_queues > bp->max_tx_rings ||
-	    eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
-	    bp->max_cp_rings ||
+	    eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues
+		+ BNXT_NUM_ASYNC_CPR(bp) > bp->max_cp_rings ||
 	    eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
 	    bp->max_stat_ctx)
 		goto resource_error;
@@ -3802,6 +3811,12 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
 	    pci_dev->id.device_id == BROADCOM_DEV_ID_57500_VF2)
 		bp->flags |= BNXT_FLAG_THOR_CHIP;
 
+	if (pci_dev->id.device_id == BROADCOM_DEV_ID_58802 ||
+	    pci_dev->id.device_id == BROADCOM_DEV_ID_58804 ||
+	    pci_dev->id.device_id == BROADCOM_DEV_ID_58808 ||
+	    pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF)
+		bp->flags |= BNXT_FLAG_STINGRAY;
+
 	rc = bnxt_init_board(eth_dev);
 	if (rc) {
 		PMD_DRV_LOG(ERR,
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 045ce4a9c..64377473a 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -737,9 +737,12 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
 	req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
 	req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
 					    AGG_RING_MULTIPLIER);
-	req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
+	req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings +
+					     bp->tx_nr_rings +
+					     BNXT_NUM_ASYNC_CPR(bp));
 	req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
-					      bp->tx_nr_rings);
+					      bp->tx_nr_rings +
+					      BNXT_NUM_ASYNC_CPR(bp));
 	req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
 	if (bp->vf_resv_strategy ==
 	    HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
@@ -2073,7 +2076,7 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
 	return rc;
 }
 
-static void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
 {
 	struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
 
@@ -2083,9 +2086,10 @@ static void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
 	memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
 				     sizeof(*cpr->cp_desc_ring));
 	cpr->cp_raw_cons = 0;
+	cpr->valid = 0;
 }
 
-static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
+void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
 {
 	struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
 
@@ -3212,7 +3216,7 @@ int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
 	req.enables = rte_cpu_to_le_32(
 			HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
 	req.async_event_cr = rte_cpu_to_le_16(
-			bp->def_cp_ring->cp_ring_struct->fw_ring_id);
+			bp->async_cp_ring->cp_ring_struct->fw_ring_id);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
 	HWRM_CHECK_RESULT();
@@ -3232,7 +3236,7 @@ int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
 	req.enables = rte_cpu_to_le_32(
 			HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
 	req.async_event_cr = rte_cpu_to_le_16(
-			bp->def_cp_ring->cp_ring_struct->fw_ring_id);
+			bp->async_cp_ring->cp_ring_struct->fw_ring_id);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 
 	HWRM_CHECK_RESULT();
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index 37aaa1a9e..c882fc2a1 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -119,6 +119,8 @@ int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp);
 int bnxt_free_all_hwrm_rings(struct bnxt *bp);
 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp);
 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp);
+void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr);
+void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr);
 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic);
 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic);
 void bnxt_free_all_hwrm_resources(struct bnxt *bp);
diff --git a/drivers/net/bnxt/bnxt_irq.c b/drivers/net/bnxt/bnxt_irq.c
index 9016871a2..a22700a0d 100644
--- a/drivers/net/bnxt/bnxt_irq.c
+++ b/drivers/net/bnxt/bnxt_irq.c
@@ -22,7 +22,7 @@ static void bnxt_int_handler(void *param)
 {
 	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
 	struct bnxt *bp = eth_dev->data->dev_private;
-	struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+	struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
 	struct cmpl_base *cmp;
 	uint32_t raw_cons;
 	uint32_t cons;
@@ -43,10 +43,13 @@ static void bnxt_int_handler(void *param)
 
 		bnxt_event_hwrm_resp_handler(bp, cmp);
 		raw_cons = NEXT_RAW_CMP(raw_cons);
-	};
+	}
 
 	cpr->cp_raw_cons = raw_cons;
-	B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
+	if (BNXT_HAS_NQ(bp))
+		bnxt_db_nq_arm(cpr);
+	else
+		B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
 }
 
 int bnxt_free_int(struct bnxt *bp)
@@ -92,19 +95,35 @@ int bnxt_free_int(struct bnxt *bp)
 
 void bnxt_disable_int(struct bnxt *bp)
 {
-	struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+	struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
+
+	if (BNXT_NUM_ASYNC_CPR(bp) == 0)
+		return;
+
+	if (!cpr || !cpr->cp_db.doorbell)
+		return;
 
 	/* Only the default completion ring */
-	if (cpr != NULL && cpr->cp_db.doorbell != NULL)
+	if (BNXT_HAS_NQ(bp))
+		bnxt_db_nq(cpr);
+	else
 		B_CP_DB_DISARM(cpr);
 }
 
 void bnxt_enable_int(struct bnxt *bp)
 {
-	struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+	struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
+
+	if (BNXT_NUM_ASYNC_CPR(bp) == 0)
+		return;
+
+	if (!cpr || !cpr->cp_db.doorbell)
+		return;
 
 	/* Only the default completion ring */
-	if (cpr != NULL && cpr->cp_db.doorbell != NULL)
+	if (BNXT_HAS_NQ(bp))
+		bnxt_db_nq_arm(cpr);
+	else
 		B_CP_DB_ARM(cpr);
 }
 
@@ -112,7 +131,7 @@ int bnxt_setup_int(struct bnxt *bp)
 {
 	uint16_t total_vecs;
 	const int len = sizeof(bp->irq_tbl[0].name);
-	int i, rc = 0;
+	int i;
 
 	/* DPDK host only supports 1 MSI-X vector */
 	total_vecs = 1;
@@ -126,14 +145,11 @@ int bnxt_setup_int(struct bnxt *bp)
 			bp->irq_tbl[i].handler = bnxt_int_handler;
 		}
 	} else {
-		rc = -ENOMEM;
-		goto setup_exit;
+		PMD_DRV_LOG(ERR, "bnxt_irq_tbl setup failed\n");
+		return -ENOMEM;
 	}
-	return 0;
 
-setup_exit:
-	PMD_DRV_LOG(ERR, "bnxt_irq_tbl setup failed\n");
-	return rc;
+	return 0;
 }
 
 int bnxt_request_int(struct bnxt *bp)
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index a9952e02c..a5447c04c 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -5,6 +5,7 @@
 
 #include <rte_bitmap.h>
 #include <rte_memzone.h>
+#include <rte_malloc.h>
 #include <unistd.h>
 
 #include "bnxt.h"
@@ -369,6 +370,7 @@ static int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index,
 {
 	struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
 	uint32_t nq_ring_id = HWRM_NA_SIGNATURE;
+	int cp_ring_index = queue_index + BNXT_NUM_ASYNC_CPR(bp);
 	uint8_t ring_type;
 	int rc = 0;
 
@@ -383,13 +385,13 @@ static int bnxt_alloc_cmpl_ring(struct bnxt *bp, int queue_index,
 		}
 	}
 
-	rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, queue_index,
+	rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, cp_ring_index,
 				  HWRM_NA_SIGNATURE, nq_ring_id);
 	if (rc)
 		return rc;
 
 	cpr->cp_cons = 0;
-	bnxt_set_db(bp, &cpr->cp_db, ring_type, queue_index,
+	bnxt_set_db(bp, &cpr->cp_db, ring_type, cp_ring_index,
 		    cp_ring->fw_ring_id);
 	bnxt_db_cq(cpr);
 
@@ -400,6 +402,7 @@ static int bnxt_alloc_nq_ring(struct bnxt *bp, int queue_index,
 			      struct bnxt_cp_ring_info *nqr)
 {
 	struct bnxt_ring *nq_ring = nqr->cp_ring_struct;
+	int nq_ring_index = queue_index + BNXT_NUM_ASYNC_CPR(bp);
 	uint8_t ring_type;
 	int rc = 0;
 
@@ -408,12 +411,12 @@ static int bnxt_alloc_nq_ring(struct bnxt *bp, int queue_index,
 
 	ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ;
 
-	rc = bnxt_hwrm_ring_alloc(bp, nq_ring, ring_type, queue_index,
+	rc = bnxt_hwrm_ring_alloc(bp, nq_ring, ring_type, nq_ring_index,
 				  HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE);
 	if (rc)
 		return rc;
 
-	bnxt_set_db(bp, &nqr->cp_db, ring_type, queue_index,
+	bnxt_set_db(bp, &nqr->cp_db, ring_type, nq_ring_index,
 		    nq_ring->fw_ring_id);
 	bnxt_db_nq(nqr);
 
@@ -490,14 +493,16 @@ int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
 	struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
 	struct bnxt_cp_ring_info *nqr = rxq->nq_ring;
 	struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
-	int rc = 0;
+	int rc;
 
 	if (BNXT_HAS_NQ(bp)) {
-		if (bnxt_alloc_nq_ring(bp, queue_index, nqr))
+		rc = bnxt_alloc_nq_ring(bp, queue_index, nqr);
+		if (rc)
 			goto err_out;
 	}
 
-	if (bnxt_alloc_cmpl_ring(bp, queue_index, cpr, nqr))
+	rc = bnxt_alloc_cmpl_ring(bp, queue_index, cpr, nqr);
+	if (rc)
 		goto err_out;
 
 	if (BNXT_HAS_RING_GRPS(bp)) {
@@ -505,22 +510,24 @@ int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
 		bp->grp_info[queue_index].cp_fw_ring_id = cp_ring->fw_ring_id;
 	}
 
-	if (!queue_index) {
+	if (!BNXT_NUM_ASYNC_CPR(bp) && !queue_index) {
 		/*
-		 * In order to save completion resources, use the first
-		 * completion ring from PF or VF as the default completion ring
-		 * for async event and HWRM forward response handling.
+		 * If a dedicated async event completion ring is not enabled,
+		 * use the first completion ring from PF or VF as the default
+		 * completion ring for async event handling.
 		 */
-		bp->def_cp_ring = cpr;
+		bp->async_cp_ring = cpr;
 		rc = bnxt_hwrm_set_async_event_cr(bp);
 		if (rc)
 			goto err_out;
 	}
 
-	if (bnxt_alloc_rx_ring(bp, queue_index))
+	rc = bnxt_alloc_rx_ring(bp, queue_index);
+	if (rc)
 		goto err_out;
 
-	if (bnxt_alloc_rx_agg_ring(bp, queue_index))
+	rc = bnxt_alloc_rx_agg_ring(bp, queue_index);
+	if (rc)
 		goto err_out;
 
 	rxq->rx_buf_use_size = BNXT_MAX_MTU + RTE_ETHER_HDR_LEN +
@@ -545,6 +552,9 @@ int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
 		    bp->eth_dev->data->rx_queue_state[queue_index]);
 
 err_out:
+	PMD_DRV_LOG(ERR,
+		    "Failed to allocate receive queue %d, rc %d.\n",
+		    queue_index, rc);
 	return rc;
 }
 
@@ -583,15 +593,13 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
 		}
 
 		bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
-
-		if (!i) {
+		if (!BNXT_NUM_ASYNC_CPR(bp) && !i) {
 			/*
-			 * In order to save completion resource, use the first
-			 * completion ring from PF or VF as the default
-			 * completion ring for async event & HWRM
-			 * forward response handling.
+			 * If a dedicated async event completion ring is not
+			 * enabled, use the first completion ring as the default
+			 * completion ring for async event handling.
 			 */
-			bp->def_cp_ring = cpr;
+			bp->async_cp_ring = cpr;
 			rc = bnxt_hwrm_set_async_event_cr(bp);
 			if (rc)
 				goto err_out;
@@ -652,3 +660,98 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
 err_out:
 	return rc;
 }
+
+/* Allocate dedicated async completion ring. */
+int bnxt_alloc_async_cp_ring(struct bnxt *bp)
+{
+	struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
+	struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+	uint8_t ring_type;
+	int rc;
+
+	if (BNXT_NUM_ASYNC_CPR(bp) == 0)
+		return 0;
+
+	if (BNXT_HAS_NQ(bp))
+		ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ;
+	else
+		ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL;
+
+	rc = bnxt_hwrm_ring_alloc(bp, cp_ring, ring_type, 0,
+				  HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE);
+
+	if (rc)
+		return rc;
+
+	cpr->cp_cons = 0;
+	cpr->valid = 0;
+	bnxt_set_db(bp, &cpr->cp_db, ring_type, 0,
+		    cp_ring->fw_ring_id);
+
+	if (BNXT_HAS_NQ(bp))
+		bnxt_db_nq(cpr);
+	else
+		bnxt_db_cq(cpr);
+
+	return bnxt_hwrm_set_async_event_cr(bp);
+}
+
+/* Free dedicated async completion ring. */
+void bnxt_free_async_cp_ring(struct bnxt *bp)
+{
+	struct bnxt_cp_ring_info *cpr = bp->async_cp_ring;
+
+	if (BNXT_NUM_ASYNC_CPR(bp) == 0 || cpr == NULL)
+		return;
+
+	if (BNXT_HAS_NQ(bp))
+		bnxt_free_nq_ring(bp, cpr);
+	else
+		bnxt_free_cp_ring(bp, cpr);
+
+	bnxt_free_ring(cpr->cp_ring_struct);
+	rte_free(cpr->cp_ring_struct);
+	cpr->cp_ring_struct = NULL;
+	rte_free(cpr);
+	bp->async_cp_ring = NULL;
+}
+
+int bnxt_alloc_async_ring_struct(struct bnxt *bp)
+{
+	struct bnxt_cp_ring_info *cpr = NULL;
+	struct bnxt_ring *ring = NULL;
+	unsigned int socket_id;
+
+	if (BNXT_NUM_ASYNC_CPR(bp) == 0)
+		return 0;
+
+	socket_id = rte_lcore_to_socket_id(rte_get_master_lcore());
+
+	cpr = rte_zmalloc_socket("cpr",
+				 sizeof(struct bnxt_cp_ring_info),
+				 RTE_CACHE_LINE_SIZE, socket_id);
+	if (cpr == NULL)
+		return -ENOMEM;
+
+	ring = rte_zmalloc_socket("bnxt_cp_ring_struct",
+				  sizeof(struct bnxt_ring),
+				  RTE_CACHE_LINE_SIZE, socket_id);
+	if (ring == NULL) {
+		rte_free(cpr);
+		return -ENOMEM;
+	}
+
+	ring->bd = (void *)cpr->cp_desc_ring;
+	ring->bd_dma = cpr->cp_desc_mapping;
+	ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE);
+	ring->ring_mask = ring->ring_size - 1;
+	ring->vmem_size = 0;
+	ring->vmem = NULL;
+
+	bp->async_cp_ring = cpr;
+	cpr->cp_ring_struct = ring;
+
+	return bnxt_alloc_rings(bp, 0, NULL, NULL,
+				bp->async_cp_ring, NULL,
+				"def_cp");
+}
diff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h
index e5cef3a1d..04c7b04b8 100644
--- a/drivers/net/bnxt/bnxt_ring.h
+++ b/drivers/net/bnxt/bnxt_ring.h
@@ -75,6 +75,9 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
 			    const char *suffix);
 int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index);
 int bnxt_alloc_hwrm_rings(struct bnxt *bp);
+int bnxt_alloc_async_cp_ring(struct bnxt *bp);
+void bnxt_free_async_cp_ring(struct bnxt *bp);
+int bnxt_alloc_async_ring_struct(struct bnxt *bp);
 
 static inline void bnxt_db_write(struct bnxt_db_info *db, uint32_t idx)
 {
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 54a2cf5fd..185a0e376 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -564,7 +564,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 				nb_rx_pkts++;
 			if (rc == -EBUSY)	/* partial completion */
 				break;
-		} else {
+		} else if (!BNXT_NUM_ASYNC_CPR(rxq->bp)) {
 			evt =
 			bnxt_event_hwrm_resp_handler(rxq->bp,
 						     (struct cmpl_base *)rxcmp);
diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_sse.c b/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
index c358506f8..adc5020ec 100644
--- a/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
+++ b/drivers/net/bnxt/bnxt_rxtx_vec_sse.c
@@ -257,7 +257,7 @@ bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 			mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1);
 
 			rx_pkts[nb_rx_pkts++] = mbuf;
-		} else {
+		} else if (!BNXT_NUM_ASYNC_CPR(rxq->bp)) {
 			evt =
 			bnxt_event_hwrm_resp_handler(rxq->bp,
 						     (struct cmpl_base *)rxcmp);
-- 
2.17.1


  parent reply	other threads:[~2019-07-24 16:14 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-07-18  3:35 [dpdk-dev] [PATCH 00/22] bnxt patchset Ajit Khaparde
2019-07-18  3:35 ` [dpdk-dev] [PATCH 01/22] net/bnxt: fix to handle error case during port start Ajit Khaparde
2019-07-18  3:35 ` [dpdk-dev] [PATCH 02/22] net/bnxt: fix return value check of address mapping Ajit Khaparde
2019-07-18  3:35 ` [dpdk-dev] [PATCH 03/22] net/bnxt: fix failure to add a MAC address Ajit Khaparde
2019-07-18  3:35 ` [dpdk-dev] [PATCH 04/22] net/bnxt: fix an unconditional wait in link update Ajit Khaparde
2019-07-18  3:35 ` [dpdk-dev] [PATCH 05/22] net/bnxt: fix setting primary MAC address Ajit Khaparde
2019-07-18  3:36 ` [dpdk-dev] [PATCH 06/22] net/bnxt: fix failure path in dev init Ajit Khaparde
2019-07-18  3:36 ` [dpdk-dev] [PATCH 07/22] net/bnxt: reset filters before registering interrupts Ajit Khaparde
2019-07-18  3:36 ` [dpdk-dev] [PATCH 08/22] net/bnxt: use correct vnic default completion ring Ajit Khaparde
2019-07-18  3:36 ` [dpdk-dev] [PATCH 09/22] net/bnxt: use dedicated cpr for async events Ajit Khaparde
2019-07-22 14:57   ` Ferruh Yigit
2019-07-22 15:06     ` Thomas Monjalon
2019-07-22 17:57       ` Lance Richardson
2019-07-22 18:34         ` Ferruh Yigit
2019-07-23  8:04           ` Thomas Monjalon
2019-07-23 10:53             ` Lance Richardson
2019-07-23 21:27           ` Lance Richardson
2019-07-24 16:14   ` Lance Richardson [this message]
2019-07-24 16:32     ` [dpdk-dev] [PATCH] " Lance Richardson
2019-07-24 16:49     ` [dpdk-dev] [[PATCH v2]] " Lance Richardson
2019-07-25  9:54       ` Ferruh Yigit
2019-07-18  3:36 ` [dpdk-dev] [PATCH 10/22] net/bnxt: retry irq callback deregistration Ajit Khaparde
2019-07-18  3:36 ` [dpdk-dev] [PATCH 11/22] net/bnxt: fix error checking of FW commands Ajit Khaparde
2019-07-18  3:36 ` [dpdk-dev] [PATCH 12/22] net/bnxt: fix to return standard error codes Ajit Khaparde
2019-07-18  3:36 ` [dpdk-dev] [PATCH 13/22] net/bnxt: fix lock release on getting NVM info Ajit Khaparde
2019-07-18  3:36 ` [dpdk-dev] [PATCH 14/22] net/bnxt: fix RSS disable issue for thor-based adapters Ajit Khaparde
2019-07-18  3:36 ` [dpdk-dev] [PATCH 15/22] net/bnxt: use correct RSS table sizes Ajit Khaparde
2019-07-18  3:36 ` [dpdk-dev] [PATCH 16/22] net/bnxt: fully initialize hwrm msgs for thor RSS cfg Ajit Khaparde
2019-07-18  3:36 ` [dpdk-dev] [PATCH 17/22] net/bnxt: use correct number of RSS contexts for thor Ajit Khaparde
2019-07-18  3:36 ` [dpdk-dev] [PATCH 18/22] net/bnxt: pass correct RSS table address " Ajit Khaparde
2019-07-18  3:36 ` [dpdk-dev] [PATCH 19/22] net/bnxt: avoid overrun in get statistics Ajit Khaparde
2019-07-18  3:36 ` [dpdk-dev] [PATCH 20/22] net/bnxt: fix MAC/VLAN filter allocation failure Ajit Khaparde
2019-07-18  3:36 ` [dpdk-dev] [PATCH 21/22] net/bnxt: fix to correctly check result of HWRM command Ajit Khaparde
2019-07-18  3:36 ` [dpdk-dev] [PATCH 22/22] net/bnxt: update HWRM API to version 1.10.0.91 Ajit Khaparde
2019-07-19 12:30 ` [dpdk-dev] [PATCH 00/22] bnxt patchset Ferruh Yigit
2019-07-19 13:22   ` Ajit Kumar Khaparde
2019-07-19 16:59     ` Ferruh Yigit
2019-07-19 21:01 ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190724161429.11946-1-lance.richardson@broadcom.com \
    --to=lance.richardson@broadcom.com \
    --cc=ajit.khaparde@broadcom.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=somnath.kotur@broadcom.com \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).