DPDK patches and discussions
 help / color / mirror / Atom feed
From: Ajit Khaparde <ajit.khaparde@broadcom.com>
To: dev@dpdk.org
Cc: ferruh.yigit@intel.com,
	Kalesh AP <kalesh-anakkur.purayil@broadcom.com>,
	Somnath Kotur <somnath.kotur@broadcom.com>
Subject: [dpdk-dev] [PATCH v1 4/9] net/bnxt: fix to allocate flow stat related structs
Date: Fri, 15 May 2020 11:45:37 -0700
Message-ID: <20200515184542.89318-5-ajit.khaparde@broadcom.com> (raw)
In-Reply-To: <20200515184542.89318-1-ajit.khaparde@broadcom.com>

Consolidate flow stat related structs for performance improvement.
The intention of this patch is to reduce the size struct bnxt which
had grown because of recent changes and was impacting performance.

Fixes: 02a95625fe9c ("net/bnxt: add flow stats in extended stats")

Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
---
 drivers/net/bnxt/bnxt.h        |  19 ++--
 drivers/net/bnxt/bnxt_ethdev.c | 153 +++++++++++++++++++++------------
 drivers/net/bnxt/bnxt_flow.c   |  14 +--
 drivers/net/bnxt/bnxt_hwrm.c   |   5 +-
 drivers/net/bnxt/bnxt_stats.c  |  14 +--
 5 files changed, 127 insertions(+), 78 deletions(-)

diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 570767253..b71435495 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -515,6 +515,16 @@ struct bnxt_mark_info {
 #define BNXT_FW_STATUS_SHUTDOWN		0x100000
 
 #define BNXT_HWRM_SHORT_REQ_LEN		sizeof(struct hwrm_short_input)
+
+struct bnxt_flow_stat_info {
+	uint16_t                max_fc;
+	uint16_t		flow_count;
+	struct bnxt_ctx_mem_buf_info rx_fc_in_tbl;
+	struct bnxt_ctx_mem_buf_info rx_fc_out_tbl;
+	struct bnxt_ctx_mem_buf_info tx_fc_in_tbl;
+	struct bnxt_ctx_mem_buf_info tx_fc_out_tbl;
+};
+
 struct bnxt {
 	void				*bar0;
 
@@ -549,6 +559,7 @@ struct bnxt {
 #define BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS		BIT(22)
 #define BNXT_FLAG_FC_THREAD			BIT(23)
 #define BNXT_FLAG_RX_VECTOR_PKT_MODE		BIT(24)
+#define BNXT_FLAG_FLOW_XSTATS_EN		BIT(25)
 #define BNXT_PF(bp)		(!((bp)->flags & BNXT_FLAG_VF))
 #define BNXT_VF(bp)		((bp)->flags & BNXT_FLAG_VF)
 #define BNXT_NPAR(bp)		((bp)->flags & BNXT_FLAG_NPAR_PF)
@@ -561,6 +572,7 @@ struct bnxt {
 #define BNXT_STINGRAY(bp)	((bp)->flags & BNXT_FLAG_STINGRAY)
 #define BNXT_HAS_NQ(bp)		BNXT_CHIP_THOR(bp)
 #define BNXT_HAS_RING_GRPS(bp)	(!BNXT_CHIP_THOR(bp))
+#define BNXT_FLOW_XSTATS_EN(bp)	((bp)->flags & BNXT_FLAG_FLOW_XSTATS_EN)
 
 	uint32_t		fw_cap;
 #define BNXT_FW_CAP_HOT_RESET		BIT(0)
@@ -709,12 +721,7 @@ struct bnxt {
 	struct tf		tfp;
 	struct bnxt_ulp_context	ulp_ctx;
 	uint8_t			truflow;
-	uint16_t                max_fc;
-	struct bnxt_ctx_mem_buf_info rx_fc_in_tbl;
-	struct bnxt_ctx_mem_buf_info rx_fc_out_tbl;
-	struct bnxt_ctx_mem_buf_info tx_fc_in_tbl;
-	struct bnxt_ctx_mem_buf_info tx_fc_out_tbl;
-	uint16_t		flow_count;
+	struct bnxt_flow_stat_info *flow_stat;
 	uint8_t			flow_xstat;
 };
 
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index fa1f84d44..90fb7f635 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -197,6 +197,12 @@ static void bnxt_free_leds_info(struct bnxt *bp)
 	bp->leds = NULL;
 }
 
+static void bnxt_free_flow_stats_info(struct bnxt *bp)
+{
+	rte_free(bp->flow_stat);
+	bp->flow_stat = NULL;
+}
+
 static void bnxt_free_cos_queues(struct bnxt *bp)
 {
 	rte_free(bp->rx_cos_queue);
@@ -205,6 +211,8 @@ static void bnxt_free_cos_queues(struct bnxt *bp)
 
 static void bnxt_free_mem(struct bnxt *bp, bool reconfig)
 {
+	bnxt_free_flow_stats_info(bp);
+
 	bnxt_free_filter_mem(bp);
 	bnxt_free_vnic_attributes(bp);
 	bnxt_free_vnic_mem(bp);
@@ -257,6 +265,16 @@ static int bnxt_alloc_cos_queues(struct bnxt *bp)
 	return 0;
 }
 
+static int bnxt_alloc_flow_stats_info(struct bnxt *bp)
+{
+	bp->flow_stat = rte_zmalloc("bnxt_flow_xstat",
+				    sizeof(struct bnxt_flow_stat_info), 0);
+	if (bp->flow_stat == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+
 static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig)
 {
 	int rc;
@@ -289,6 +307,12 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool reconfig)
 	if (rc)
 		goto alloc_mem_err;
 
+	if (BNXT_FLOW_XSTATS_EN(bp)) {
+		rc = bnxt_alloc_flow_stats_info(bp);
+		if (rc)
+			goto alloc_mem_err;
+	}
+
 	return 0;
 
 alloc_mem_err:
@@ -390,68 +414,72 @@ static int bnxt_register_fc_ctx_mem(struct bnxt *bp)
 {
 	int rc = 0;
 
-	rc = bnxt_hwrm_ctx_rgtr(bp, bp->rx_fc_in_tbl.dma,
-				&bp->rx_fc_in_tbl.ctx_id);
+	rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_in_tbl.dma,
+				&bp->flow_stat->rx_fc_in_tbl.ctx_id);
 	if (rc)
 		return rc;
 
 	PMD_DRV_LOG(DEBUG,
 		    "rx_fc_in_tbl.va = %p rx_fc_in_tbl.dma = %p"
 		    " rx_fc_in_tbl.ctx_id = %d\n",
-		    bp->rx_fc_in_tbl.va,
-		    (void *)((uintptr_t)bp->rx_fc_in_tbl.dma),
-		    bp->rx_fc_in_tbl.ctx_id);
+		    bp->flow_stat->rx_fc_in_tbl.va,
+		    (void *)((uintptr_t)bp->flow_stat->rx_fc_in_tbl.dma),
+		    bp->flow_stat->rx_fc_in_tbl.ctx_id);
 
-	rc = bnxt_hwrm_ctx_rgtr(bp, bp->rx_fc_out_tbl.dma,
-				&bp->rx_fc_out_tbl.ctx_id);
+	rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->rx_fc_out_tbl.dma,
+				&bp->flow_stat->rx_fc_out_tbl.ctx_id);
 	if (rc)
 		return rc;
 
 	PMD_DRV_LOG(DEBUG,
 		    "rx_fc_out_tbl.va = %p rx_fc_out_tbl.dma = %p"
 		    " rx_fc_out_tbl.ctx_id = %d\n",
-		    bp->rx_fc_out_tbl.va,
-		    (void *)((uintptr_t)bp->rx_fc_out_tbl.dma),
-		    bp->rx_fc_out_tbl.ctx_id);
+		    bp->flow_stat->rx_fc_out_tbl.va,
+		    (void *)((uintptr_t)bp->flow_stat->rx_fc_out_tbl.dma),
+		    bp->flow_stat->rx_fc_out_tbl.ctx_id);
 
-	rc = bnxt_hwrm_ctx_rgtr(bp, bp->tx_fc_in_tbl.dma,
-				&bp->tx_fc_in_tbl.ctx_id);
+	rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_in_tbl.dma,
+				&bp->flow_stat->tx_fc_in_tbl.ctx_id);
 	if (rc)
 		return rc;
 
 	PMD_DRV_LOG(DEBUG,
 		    "tx_fc_in_tbl.va = %p tx_fc_in_tbl.dma = %p"
 		    " tx_fc_in_tbl.ctx_id = %d\n",
-		    bp->tx_fc_in_tbl.va,
-		    (void *)((uintptr_t)bp->tx_fc_in_tbl.dma),
-		    bp->tx_fc_in_tbl.ctx_id);
+		    bp->flow_stat->tx_fc_in_tbl.va,
+		    (void *)((uintptr_t)bp->flow_stat->tx_fc_in_tbl.dma),
+		    bp->flow_stat->tx_fc_in_tbl.ctx_id);
 
-	rc = bnxt_hwrm_ctx_rgtr(bp, bp->tx_fc_out_tbl.dma,
-				&bp->tx_fc_out_tbl.ctx_id);
+	rc = bnxt_hwrm_ctx_rgtr(bp, bp->flow_stat->tx_fc_out_tbl.dma,
+				&bp->flow_stat->tx_fc_out_tbl.ctx_id);
 	if (rc)
 		return rc;
 
 	PMD_DRV_LOG(DEBUG,
 		    "tx_fc_out_tbl.va = %p tx_fc_out_tbl.dma = %p"
 		    " tx_fc_out_tbl.ctx_id = %d\n",
-		    bp->tx_fc_out_tbl.va,
-		    (void *)((uintptr_t)bp->tx_fc_out_tbl.dma),
-		    bp->tx_fc_out_tbl.ctx_id);
+		    bp->flow_stat->tx_fc_out_tbl.va,
+		    (void *)((uintptr_t)bp->flow_stat->tx_fc_out_tbl.dma),
+		    bp->flow_stat->tx_fc_out_tbl.ctx_id);
 
-	memset(bp->rx_fc_out_tbl.va, 0, bp->rx_fc_out_tbl.size);
+	memset(bp->flow_stat->rx_fc_out_tbl.va,
+	       0,
+	       bp->flow_stat->rx_fc_out_tbl.size);
 	rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX,
 				       CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
-				       bp->rx_fc_out_tbl.ctx_id,
-				       bp->max_fc,
+				       bp->flow_stat->rx_fc_out_tbl.ctx_id,
+				       bp->flow_stat->max_fc,
 				       true);
 	if (rc)
 		return rc;
 
-	memset(bp->tx_fc_out_tbl.va, 0, bp->tx_fc_out_tbl.size);
+	memset(bp->flow_stat->tx_fc_out_tbl.va,
+	       0,
+	       bp->flow_stat->tx_fc_out_tbl.size);
 	rc = bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX,
 				       CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
-				       bp->tx_fc_out_tbl.ctx_id,
-				       bp->max_fc,
+				       bp->flow_stat->tx_fc_out_tbl.ctx_id,
+				       bp->flow_stat->max_fc,
 				       true);
 
 	return rc;
@@ -482,33 +510,41 @@ static int bnxt_init_fc_ctx_mem(struct bnxt *bp)
 	uint16_t max_fc;
 	int rc = 0;
 
-	max_fc = bp->max_fc;
+	max_fc = bp->flow_stat->max_fc;
 
 	sprintf(type, "bnxt_rx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
 		pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
 	/* 4 bytes for each counter-id */
-	rc = bnxt_alloc_ctx_mem_buf(type, max_fc * 4, &bp->rx_fc_in_tbl);
+	rc = bnxt_alloc_ctx_mem_buf(type,
+				    max_fc * 4,
+				    &bp->flow_stat->rx_fc_in_tbl);
 	if (rc)
 		return rc;
 
 	sprintf(type, "bnxt_rx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
 		pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
 	/* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
-	rc = bnxt_alloc_ctx_mem_buf(type, max_fc * 16, &bp->rx_fc_out_tbl);
+	rc = bnxt_alloc_ctx_mem_buf(type,
+				    max_fc * 16,
+				    &bp->flow_stat->rx_fc_out_tbl);
 	if (rc)
 		return rc;
 
 	sprintf(type, "bnxt_tx_fc_in_" PCI_PRI_FMT, pdev->addr.domain,
 		pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
 	/* 4 bytes for each counter-id */
-	rc = bnxt_alloc_ctx_mem_buf(type, max_fc * 4, &bp->tx_fc_in_tbl);
+	rc = bnxt_alloc_ctx_mem_buf(type,
+				    max_fc * 4,
+				    &bp->flow_stat->tx_fc_in_tbl);
 	if (rc)
 		return rc;
 
 	sprintf(type, "bnxt_tx_fc_out_" PCI_PRI_FMT, pdev->addr.domain,
 		pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
 	/* 16 bytes for each counter - 8 bytes pkt_count, 8 bytes byte_count */
-	rc = bnxt_alloc_ctx_mem_buf(type, max_fc * 16, &bp->tx_fc_out_tbl);
+	rc = bnxt_alloc_ctx_mem_buf(type,
+				    max_fc * 16,
+				    &bp->flow_stat->tx_fc_out_tbl);
 	if (rc)
 		return rc;
 
@@ -522,10 +558,11 @@ static int bnxt_init_ctx_mem(struct bnxt *bp)
 	int rc = 0;
 
 	if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS) ||
-	    !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)))
+	    !(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) ||
+	    !BNXT_FLOW_XSTATS_EN(bp))
 		return 0;
 
-	rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->max_fc);
+	rc = bnxt_hwrm_cfa_counter_qcaps(bp, &bp->flow_stat->max_fc);
 	if (rc)
 		return rc;
 
@@ -1244,6 +1281,9 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
 
 	bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
 	bp->rx_cosq_cnt = 0;
+	/* All filters are deleted on a port stop. */
+	if (BNXT_FLOW_XSTATS_EN(bp))
+		bp->flow_stat->flow_count = 0;
 }
 
 static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
@@ -5314,8 +5354,8 @@ bnxt_parse_devarg_flow_xstat(__rte_unused const char *key,
 		return -EINVAL;
 	}
 
-	bp->flow_xstat = flow_xstat;
-	if (bp->flow_xstat)
+	bp->flags |= BNXT_FLAG_FLOW_XSTATS_EN;
+	if (BNXT_FLOW_XSTATS_EN(bp))
 		PMD_DRV_LOG(INFO, "flow_xstat feature enabled.\n");
 
 	return 0;
@@ -5457,46 +5497,47 @@ static void bnxt_unregister_fc_ctx_mem(struct bnxt *bp)
 {
 	bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_RX,
 				  CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
-				  bp->rx_fc_out_tbl.ctx_id,
-				  bp->max_fc,
+				  bp->flow_stat->rx_fc_out_tbl.ctx_id,
+				  bp->flow_stat->max_fc,
 				  false);
 
 	bnxt_hwrm_cfa_counter_cfg(bp, BNXT_DIR_TX,
 				  CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC,
-				  bp->tx_fc_out_tbl.ctx_id,
-				  bp->max_fc,
+				  bp->flow_stat->tx_fc_out_tbl.ctx_id,
+				  bp->flow_stat->max_fc,
 				  false);
 
-	if (bp->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
-		bnxt_hwrm_ctx_unrgtr(bp, bp->rx_fc_in_tbl.ctx_id);
-	bp->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
+	if (bp->flow_stat->rx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
+		bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_in_tbl.ctx_id);
+	bp->flow_stat->rx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
 
-	if (bp->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
-		bnxt_hwrm_ctx_unrgtr(bp, bp->rx_fc_out_tbl.ctx_id);
-	bp->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
+	if (bp->flow_stat->rx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
+		bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->rx_fc_out_tbl.ctx_id);
+	bp->flow_stat->rx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
 
-	if (bp->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
-		bnxt_hwrm_ctx_unrgtr(bp, bp->tx_fc_in_tbl.ctx_id);
-	bp->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
+	if (bp->flow_stat->tx_fc_in_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
+		bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_in_tbl.ctx_id);
+	bp->flow_stat->tx_fc_in_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
 
-	if (bp->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
-		bnxt_hwrm_ctx_unrgtr(bp, bp->tx_fc_out_tbl.ctx_id);
-	bp->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
+	if (bp->flow_stat->tx_fc_out_tbl.ctx_id != BNXT_CTX_VAL_INVAL)
+		bnxt_hwrm_ctx_unrgtr(bp, bp->flow_stat->tx_fc_out_tbl.ctx_id);
+	bp->flow_stat->tx_fc_out_tbl.ctx_id = BNXT_CTX_VAL_INVAL;
 }
 
 static void bnxt_uninit_fc_ctx_mem(struct bnxt *bp)
 {
 	bnxt_unregister_fc_ctx_mem(bp);
 
-	bnxt_free_ctx_mem_buf(&bp->rx_fc_in_tbl);
-	bnxt_free_ctx_mem_buf(&bp->rx_fc_out_tbl);
-	bnxt_free_ctx_mem_buf(&bp->tx_fc_in_tbl);
-	bnxt_free_ctx_mem_buf(&bp->tx_fc_out_tbl);
+	bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_in_tbl);
+	bnxt_free_ctx_mem_buf(&bp->flow_stat->rx_fc_out_tbl);
+	bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_in_tbl);
+	bnxt_free_ctx_mem_buf(&bp->flow_stat->tx_fc_out_tbl);
 }
 
 static void bnxt_uninit_ctx_mem(struct bnxt *bp)
 {
-	bnxt_uninit_fc_ctx_mem(bp);
+	if (BNXT_FLOW_XSTATS_EN(bp))
+		bnxt_uninit_fc_ctx_mem(bp);
 }
 
 static void
diff --git a/drivers/net/bnxt/bnxt_flow.c b/drivers/net/bnxt/bnxt_flow.c
index 44734272f..84a21dba9 100644
--- a/drivers/net/bnxt/bnxt_flow.c
+++ b/drivers/net/bnxt/bnxt_flow.c
@@ -1633,7 +1633,7 @@ static void
 bnxt_setup_flow_counter(struct bnxt *bp)
 {
 	if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS &&
-	    !(bp->flags & BNXT_FLAG_FC_THREAD)) {
+	    !(bp->flags & BNXT_FLAG_FC_THREAD) && BNXT_FLOW_XSTATS_EN(bp)) {
 		rte_eal_alarm_set(US_PER_S * BNXT_FC_TIMER,
 				  bnxt_flow_cnt_alarm_cb,
 				  (void *)bp);
@@ -1646,13 +1646,13 @@ void bnxt_flow_cnt_alarm_cb(void *arg)
 	int rc = 0;
 	struct bnxt *bp = arg;
 
-	if (!bp->rx_fc_out_tbl.va) {
-		PMD_DRV_LOG(ERR, "bp->rx_fc_out_tbl.va is NULL?\n");
+	if (!bp->flow_stat->rx_fc_out_tbl.va) {
+		PMD_DRV_LOG(ERR, "bp->flow_stat->rx_fc_out_tbl.va is NULL?\n");
 		bnxt_cancel_fc_thread(bp);
 		return;
 	}
 
-	if (!bp->flow_count) {
+	if (!bp->flow_stat->flow_count) {
 		bnxt_cancel_fc_thread(bp);
 		return;
 	}
@@ -1830,7 +1830,8 @@ bnxt_flow_create(struct rte_eth_dev *dev,
 			bp->mark_table[flow_id].valid = true;
 			bp->mark_table[flow_id].mark_id = filter->mark;
 		}
-		bp->flow_count++;
+		if (BNXT_FLOW_XSTATS_EN(bp))
+			bp->flow_stat->flow_count++;
 		bnxt_release_flow_lock(bp);
 		bnxt_setup_flow_counter(bp);
 		return flow;
@@ -1952,7 +1953,8 @@ _bnxt_flow_destroy(struct bnxt *bp,
 		bnxt_free_filter(bp, filter);
 		STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
 		rte_free(flow);
-		bp->flow_count--;
+		if (BNXT_FLOW_XSTATS_EN(bp))
+			bp->flow_stat->flow_count--;
 
 		/* If this was the last flow associated with this vnic,
 		 * switch the queue back to RSS pool.
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 148000934..4022fafd1 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -5274,7 +5274,6 @@ int bnxt_hwrm_cfa_counter_qcaps(struct bnxt *bp, uint16_t *max_fc)
 		*max_fc = rte_le_to_cpu_16(resp->max_rx_fc);
 	HWRM_UNLOCK();
 
-	PMD_DRV_LOG(DEBUG, "max_fc = %d\n", *max_fc);
 	return 0;
 }
 
@@ -5387,10 +5386,10 @@ int bnxt_hwrm_cfa_counter_qstats(struct bnxt *bp,
 	}
 
 	if (dir == BNXT_DIR_RX) {
-		flow_ctx_id = bp->rx_fc_in_tbl.ctx_id;
+		flow_ctx_id = bp->flow_stat->rx_fc_in_tbl.ctx_id;
 		flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX;
 	} else if (dir == BNXT_DIR_TX) {
-		flow_ctx_id = bp->tx_fc_in_tbl.ctx_id;
+		flow_ctx_id = bp->flow_stat->tx_fc_in_tbl.ctx_id;
 		flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_TX;
 	}
 
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index 1d3be16f8..cfe193284 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -669,7 +669,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
 
 	if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS &&
 	    bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT &&
-	    bp->flow_xstat) {
+	    BNXT_FLOW_XSTATS_EN(bp)) {
 		int j;
 
 		i = 0;
@@ -713,7 +713,7 @@ int bnxt_flow_stats_cnt(struct bnxt *bp)
 {
 	if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS &&
 	    bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT &&
-	    bp->flow_xstat) {
+	    BNXT_FLOW_XSTATS_EN(bp)) {
 		struct bnxt_xstats_name_off flow_bytes[bp->max_l2_ctx];
 		struct bnxt_xstats_name_off flow_pkts[bp->max_l2_ctx];
 
@@ -783,7 +783,7 @@ int bnxt_dev_xstats_get_names_op(struct rte_eth_dev *eth_dev,
 
 		if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS &&
 		    bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT &&
-		    bp->flow_xstat) {
+		    BNXT_FLOW_XSTATS_EN(bp)) {
 			for (i = 0; i < bp->max_l2_ctx; i++) {
 				char buf[RTE_ETH_XSTATS_NAME_SIZE];
 
@@ -936,8 +936,8 @@ static int bnxt_update_fc_tbl(struct bnxt *bp, uint16_t ctr,
 	uint32_t out_rx_tbl_cnt = 0;
 	int i, rc = 0;
 
-	in_rx_tbl = (uint32_t *)bp->rx_fc_in_tbl.va;
-	out_rx_tbl = (uint64_t *)bp->rx_fc_out_tbl.va;
+	in_rx_tbl = (uint32_t *)bp->flow_stat->rx_fc_in_tbl.va;
+	out_rx_tbl = (uint64_t *)bp->flow_stat->rx_fc_out_tbl.va;
 
 	for (i = 0; i < in_flow_cnt; i++) {
 		if (!en_tbl[i])
@@ -979,7 +979,7 @@ int bnxt_flow_stats_req(struct bnxt *bp)
 	struct rte_flow *flow;
 	uint16_t in_flow_tbl_cnt = 0;
 	struct bnxt_vnic_info *vnic = NULL;
-	struct bnxt_filter_info *valid_en_tbl[bp->max_fc];
+	struct bnxt_filter_info *valid_en_tbl[bp->flow_stat->max_fc];
 	uint16_t counter_type = CFA_COUNTER_CFG_IN_COUNTER_TYPE_FC;
 
 	bnxt_acquire_flow_lock(bp);
@@ -996,7 +996,7 @@ int bnxt_flow_stats_req(struct bnxt *bp)
 				continue;
 
 			valid_en_tbl[in_flow_tbl_cnt++] = flow->filter;
-			if (in_flow_tbl_cnt >= bp->max_fc) {
+			if (in_flow_tbl_cnt >= bp->flow_stat->max_fc) {
 				rc = bnxt_update_fc_tbl(bp, counter_type,
 							valid_en_tbl,
 							in_flow_tbl_cnt);
-- 
2.21.1 (Apple Git-122.3)


  parent reply	other threads:[~2020-05-15 18:46 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-05-15 18:45 [dpdk-dev] [PATCH v1 0/9] bug fixes for bnxt PMD Ajit Khaparde
2020-05-15 18:45 ` [dpdk-dev] [PATCH v1 1/9] net/bnxt: fix error log for command timeout Ajit Khaparde
2020-05-15 18:45 ` [dpdk-dev] [PATCH v1 2/9] net/bnxt: fix to alloc LED config info Ajit Khaparde
2020-05-15 18:45 ` [dpdk-dev] [PATCH v1 3/9] net/bnxt: fix to alloc COS queue info dynamically Ajit Khaparde
2020-05-15 18:45 ` Ajit Khaparde [this message]
2020-05-15 18:45 ` [dpdk-dev] [PATCH v1 5/9] net/bnxt: fix to alloc link info struct Ajit Khaparde
2020-05-15 18:45 ` [dpdk-dev] [PATCH v1 6/9] net/bnxt: fix to alloc PF info structure Ajit Khaparde
2020-05-15 18:45 ` [dpdk-dev] [PATCH v1 7/9] net/bnxt: fix to use RSS config from eth dev struct Ajit Khaparde
2020-05-15 18:45 ` [dpdk-dev] [PATCH v1 8/9] net/bnxt: fix to remove unneeded structure variable Ajit Khaparde
2020-05-15 18:45 ` [dpdk-dev] [PATCH v1 9/9] net/bnxt: fix to allocate bnxt ulp context Ajit Khaparde
2020-05-16 15:12 ` [dpdk-dev] [PATCH v1 0/9] bug fixes for bnxt PMD Ajit Khaparde

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200515184542.89318-5-ajit.khaparde@broadcom.com \
    --to=ajit.khaparde@broadcom.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=kalesh-anakkur.purayil@broadcom.com \
    --cc=somnath.kotur@broadcom.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

DPDK patches and discussions

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://inbox.dpdk.org/dev/0 dev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 dev dev/ https://inbox.dpdk.org/dev \
		dev@dpdk.org
	public-inbox-index dev

Example config snippet for mirrors.
Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.dev


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git