patches for DPDK stable branches
 help / color / mirror / Atom feed
* [dpdk-stable] [PATCH 01/13] net/bnxt: fix the corruption of the session details
       [not found] <20201009111130.10422-1-somnath.kotur@broadcom.com>
@ 2020-10-09 11:11 ` Somnath Kotur
  2020-10-09 11:11 ` [dpdk-stable] [PATCH 04/13] net/bnxt: fixes for PMD PF support in SR-IOV mode Somnath Kotur
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 10+ messages in thread
From: Somnath Kotur @ 2020-10-09 11:11 UTC (permalink / raw)
  To: dev
  Cc: ferruh.yigit, Kishore Padmanabha, stable, Michael Baucom,
	Ajit Kumar Khaparde

From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>

The session details that is shared among multiple ports
need to be outside the bnxt structure.

Fixes: 70e64b27af5b ("net/bnxt: support ULP session manager cleanup")
Cc: stable@dpdk.org

Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Michael Baucom <michael.baucom@broadcom.com>
Reviewed-by: Ajit Kumar Khaparde <ajit.khaparde@broadcom.com>
---
 drivers/net/bnxt/tf_ulp/bnxt_ulp.c | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
index 2896194..a4d48c7 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
@@ -159,7 +159,9 @@ ulp_ctx_session_open(struct bnxt *bp,
 	}
 	if (!session->session_opened) {
 		session->session_opened = 1;
-		session->g_tfp = &bp->tfp;
+		session->g_tfp = rte_zmalloc("bnxt_ulp_session_tfp",
+					     sizeof(struct tf), 0);
+		session->g_tfp->session = bp->tfp.session;
 	}
 	return rc;
 }
@@ -176,6 +178,7 @@ ulp_ctx_session_close(struct bnxt *bp,
 	if (session->session_opened)
 		tf_close_session(&bp->tfp);
 	session->session_opened = 0;
+	rte_free(session->g_tfp);
 	session->g_tfp = NULL;
 }
 
-- 
2.7.4


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-stable] [PATCH 04/13] net/bnxt: fixes for PMD PF support in SR-IOV mode
       [not found] <20201009111130.10422-1-somnath.kotur@broadcom.com>
  2020-10-09 11:11 ` [dpdk-stable] [PATCH 01/13] net/bnxt: fix the corruption of the session details Somnath Kotur
@ 2020-10-09 11:11 ` Somnath Kotur
  2020-10-09 11:11 ` [dpdk-stable] [PATCH 07/13] net/bnxt: register PF for default vnic change async event Somnath Kotur
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 10+ messages in thread
From: Somnath Kotur @ 2020-10-09 11:11 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit, Venkat Duvvuru, stable, Somnath Kotur

From: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>

1. Implement HWRM_FUNC_VF_RESOURCE_CFG command and use it to
   reserve resources for VFs when NEW RM is enabled.
2. Invoke PF’s FUNC_CFG before configuring VFs resources.
3. Don’t consider max_rx_em_flows in max_l2_ctx calculation
   when VFs are configured.
4. Issue HWRM_FUNC_QCFG instead of HWRM_FUNC_QCAPS to find
   out the actual allocated resources for VF.
5. Don’t add random mac to the VF.
6. Handle completion type CMPL_BASE_TYPE_HWRM_FWD_REQ instead
   of CMPL_BASE_TYPE_HWRM_FWD_RESP.
7. Don't enable HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE
   when the list of HWRM commands that needs to be forwarded
   to the PF is specified in HWRM_FUNC_DRV_RGTR.
8. Update the HWRM commands list that can be forwared to the
   PF.

Fixes: b7778e8a1c00 ("net/bnxt: refactor to properly allocate resources for PF/VF")
Cc: stable@dpdk.org

Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
---
 drivers/net/bnxt/bnxt.h        |   6 +-
 drivers/net/bnxt/bnxt_cpr.c    |   6 +-
 drivers/net/bnxt/bnxt_ethdev.c |  40 +---
 drivers/net/bnxt/bnxt_hwrm.c   | 458 +++++++++++++++++++++++++----------------
 drivers/net/bnxt/bnxt_hwrm.h   |  12 +-
 5 files changed, 306 insertions(+), 216 deletions(-)

diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index eca7448..a951bca 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -167,6 +167,9 @@
 #define	BNXT_DEFAULT_VNIC_CHANGE_VF_ID_SFT		\
 	HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT
 
+#define BNXT_HWRM_CMD_TO_FORWARD(cmd)	\
+		(bp->pf->vf_req_fwd[(cmd) / 32] |= (1 << ((cmd) % 32)))
+
 struct bnxt_led_info {
 	uint8_t	     num_leds;
 	uint8_t      led_id;
@@ -664,9 +667,10 @@ struct bnxt {
 #define BNXT_FW_CAP_IF_CHANGE		BIT(1)
 #define BNXT_FW_CAP_ERROR_RECOVERY	BIT(2)
 #define BNXT_FW_CAP_ERR_RECOVER_RELOAD	BIT(3)
+#define BNXT_FW_CAP_HCOMM_FW_STATUS	BIT(4)
 #define BNXT_FW_CAP_ADV_FLOW_MGMT	BIT(5)
 #define BNXT_FW_CAP_ADV_FLOW_COUNTERS	BIT(6)
-#define BNXT_FW_CAP_HCOMM_FW_STATUS	BIT(7)
+#define BNXT_FW_CAP_LINK_ADMIN		BIT(7)
 
 	pthread_mutex_t         flow_lock;
 
diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
index a3a7e6a..5492394 100644
--- a/drivers/net/bnxt/bnxt_cpr.c
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -239,7 +239,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
 		goto reject;
 	}
 
-	if (bnxt_rcv_msg_from_vf(bp, vf_id, fwd_cmd) == true) {
+	if (bnxt_rcv_msg_from_vf(bp, vf_id, fwd_cmd)) {
 		/*
 		 * In older firmware versions, the MAC had to be all zeros for
 		 * the VF to set it's MAC via hwrm_func_vf_cfg. Set to all
@@ -254,6 +254,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
 				(const uint8_t *)"\x00\x00\x00\x00\x00");
 			}
 		}
+
 		if (fwd_cmd->req_type == HWRM_CFA_L2_SET_RX_MASK) {
 			struct hwrm_cfa_l2_set_rx_mask_input *srm =
 							(void *)fwd_cmd;
@@ -265,6 +266,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
 			    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN |
 			    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN);
 		}
+
 		/* Forward */
 		rc = bnxt_hwrm_exec_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
 		if (rc) {
@@ -306,7 +308,7 @@ int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp)
 		bnxt_handle_async_event(bp, cmp);
 		evt = 1;
 		break;
-	case CMPL_BASE_TYPE_HWRM_FWD_RESP:
+	case CMPL_BASE_TYPE_HWRM_FWD_REQ:
 		/* Handle HWRM forwarded responses */
 		bnxt_handle_fwd_req(bp, cmp);
 		evt = 1;
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 8b63134..b4654ec 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -5208,37 +5208,14 @@ static void bnxt_config_vf_req_fwd(struct bnxt *bp)
 	if (!BNXT_PF(bp))
 		return;
 
-#define ALLOW_FUNC(x)	\
-	{ \
-		uint32_t arg = (x); \
-		bp->pf->vf_req_fwd[((arg) >> 5)] &= \
-		~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
-	}
-
-	/* Forward all requests if firmware is new enough */
-	if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
-	     (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
-	    ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
-		memset(bp->pf->vf_req_fwd, 0xff, sizeof(bp->pf->vf_req_fwd));
-	} else {
-		PMD_DRV_LOG(WARNING,
-			    "Firmware too old for VF mailbox functionality\n");
-		memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd));
-	}
+	memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd));
 
-	/*
-	 * The following are used for driver cleanup. If we disallow these,
-	 * VF drivers can't clean up cleanly.
-	 */
-	ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
-	ALLOW_FUNC(HWRM_VNIC_FREE);
-	ALLOW_FUNC(HWRM_RING_FREE);
-	ALLOW_FUNC(HWRM_RING_GRP_FREE);
-	ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
-	ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
-	ALLOW_FUNC(HWRM_STAT_CTX_FREE);
-	ALLOW_FUNC(HWRM_PORT_PHY_QCFG);
-	ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
+	if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN))
+		BNXT_HWRM_CMD_TO_FORWARD(HWRM_PORT_PHY_QCFG);
+	BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_CFG);
+	BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_VF_CFG);
+	BNXT_HWRM_CMD_TO_FORWARD(HWRM_CFA_L2_FILTER_ALLOC);
+	BNXT_HWRM_CMD_TO_FORWARD(HWRM_OEM_CMD);
 }
 
 uint16_t
@@ -6189,7 +6166,10 @@ bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev)
 
 	bnxt_free_int(bp);
 	bnxt_free_mem(bp, reconfig_dev);
+
 	bnxt_hwrm_func_buf_unrgtr(bp);
+	rte_free(bp->pf->vf_req_buf);
+
 	rc = bnxt_hwrm_func_driver_unregister(bp, 0);
 	bp->flags &= ~BNXT_FLAG_REGISTERED;
 	bnxt_free_ctx_mem(bp);
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index faeaf4b..e49f56c 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -765,7 +765,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
 	bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
 	bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
 	bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
-	if (!BNXT_CHIP_THOR(bp))
+	if (!BNXT_CHIP_THOR(bp) && !bp->pdev->max_vfs)
 		bp->max_l2_ctx += bp->max_rx_em_flows;
 	/* TODO: For now, do not support VMDq/RFS on VFs. */
 	if (BNXT_PF(bp)) {
@@ -803,6 +803,9 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE)
 		bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
 
+	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
+		bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
+
 	HWRM_UNLOCK();
 
 	return rc;
@@ -818,16 +821,15 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
 		if (rc)
 			return rc;
 
+		/* On older FW,
+		 * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.
+		 * But the error can be ignored. Return success.
+		 */
 		rc = bnxt_hwrm_func_resc_qcaps(bp);
 		if (!rc)
 			bp->flags |= BNXT_FLAG_NEW_RM;
 	}
 
-	/* On older FW,
-	 * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.
-	 * But the error can be ignored. Return success.
-	 */
-
 	return 0;
 }
 
@@ -916,14 +918,6 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
 		memcpy(req.vf_req_fwd, bp->pf->vf_req_fwd,
 		       RTE_MIN(sizeof(req.vf_req_fwd),
 			       sizeof(bp->pf->vf_req_fwd)));
-
-		/*
-		 * PF can sniff HWRM API issued by VF. This can be set up by
-		 * linux driver and inherited by the DPDK PF driver. Clear
-		 * this HWRM sniffer list in FW because DPDK PF driver does
-		 * not support this.
-		 */
-		flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE;
 	}
 
 	req.flags = rte_cpu_to_le_32(flags);
@@ -1052,21 +1046,19 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
 
 	HWRM_CHECK_RESULT_SILENT();
 
-	if (BNXT_VF(bp)) {
-		bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
-		bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
-		bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
-		bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
-		bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
-		/* func_resource_qcaps does not return max_rx_em_flows.
-		 * So use the value provided by func_qcaps.
-		 */
-		bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
-		if (!BNXT_CHIP_THOR(bp))
-			bp->max_l2_ctx += bp->max_rx_em_flows;
-		bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
-		bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
-	}
+	bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
+	bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
+	bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
+	bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
+	bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
+	/* func_resource_qcaps does not return max_rx_em_flows.
+	 * So use the value provided by func_qcaps.
+	 */
+	bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
+	if (!BNXT_CHIP_THOR(bp) && !bp->pdev->max_vfs)
+		bp->max_l2_ctx += bp->max_rx_em_flows;
+	bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
+	bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
 	bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
 	bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
 	if (bp->vf_resv_strategy >
@@ -3300,33 +3292,8 @@ int bnxt_hwrm_port_mac_qcfg(struct bnxt *bp)
 	return 0;
 }
 
-static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
-				   struct hwrm_func_qcaps_output *qcaps)
-{
-	qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
-	memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
-	       sizeof(qcaps->mac_address));
-	qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
-	qcaps->max_rx_rings = fcfg->num_rx_rings;
-	qcaps->max_tx_rings = fcfg->num_tx_rings;
-	qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
-	qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
-	qcaps->max_vfs = 0;
-	qcaps->first_vf_id = 0;
-	qcaps->max_vnics = fcfg->num_vnics;
-	qcaps->max_decap_records = 0;
-	qcaps->max_encap_records = 0;
-	qcaps->max_tx_wm_flows = 0;
-	qcaps->max_tx_em_flows = 0;
-	qcaps->max_rx_wm_flows = 0;
-	qcaps->max_rx_em_flows = 0;
-	qcaps->max_flow_id = 0;
-	qcaps->max_mcast_filters = fcfg->num_mcast_filters;
-	qcaps->max_sp_tx_rings = 0;
-	qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
-}
-
-static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
+static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp,
+				 struct bnxt_pf_resource_info *pf_resc)
 {
 	struct hwrm_func_cfg_input req = {0};
 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
@@ -3345,7 +3312,8 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
 
 	if (BNXT_HAS_RING_GRPS(bp)) {
 		enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
-		req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
+		req.num_hw_ring_grps =
+			rte_cpu_to_le_16(pf_resc->num_hw_ring_grps);
 	} else if (BNXT_HAS_NQ(bp)) {
 		enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
 		req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
@@ -3354,12 +3322,12 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
 	req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
 	req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
 	req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
-	req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
-	req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
-	req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
-	req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
-	req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
-	req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
+	req.num_rsscos_ctxs = rte_cpu_to_le_16(pf_resc->num_rsscos_ctxs);
+	req.num_stat_ctxs = rte_cpu_to_le_16(pf_resc->num_stat_ctxs);
+	req.num_cmpl_rings = rte_cpu_to_le_16(pf_resc->num_cp_rings);
+	req.num_tx_rings = rte_cpu_to_le_16(pf_resc->num_tx_rings);
+	req.num_rx_rings = rte_cpu_to_le_16(pf_resc->num_rx_rings);
+	req.num_l2_ctxs = rte_cpu_to_le_16(pf_resc->num_l2_ctxs);
 	req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
 	req.fid = rte_cpu_to_le_16(0xffff);
 	req.enables = rte_cpu_to_le_32(enables);
@@ -3374,9 +3342,40 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
 	return rc;
 }
 
-static void populate_vf_func_cfg_req(struct bnxt *bp,
-				     struct hwrm_func_cfg_input *req,
-				     int num_vfs)
+/* min values are the guaranteed resources and max values are subject
+ * to availability. The strategy for now is to keep both min & max
+ * values the same.
+ */
+static void
+bnxt_fill_vf_func_cfg_req_new(struct bnxt *bp,
+			      struct hwrm_func_vf_resource_cfg_input *req,
+			      int num_vfs)
+{
+	req->min_rsscos_ctx = req->max_rsscos_ctx =
+			rte_cpu_to_le_16(bp->max_rsscos_ctx /
+					 (num_vfs + 1));
+	req->min_stat_ctx = req->max_stat_ctx =
+			rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
+	req->min_cmpl_rings = req->max_cmpl_rings =
+			rte_cpu_to_le_16(bp->max_cp_rings / (num_vfs + 1));
+	req->min_tx_rings = req->max_tx_rings =
+			rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
+	req->min_rx_rings = req->max_rx_rings =
+			rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
+	req->min_l2_ctxs = req->max_l2_ctxs =
+			rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
+	/* TODO: For now, do not support VMDq/RFS on VFs. */
+	req->min_vnics = req->max_vnics = rte_cpu_to_le_16(1);
+	req->min_hw_ring_grps = req->max_hw_ring_grps =
+			rte_cpu_to_le_16(bp->max_ring_grps / (num_vfs + 1));
+	req->flags =
+	 rte_cpu_to_le_16(HWRM_FUNC_VF_RESOURCE_CFG_INPUT_FLAGS_MIN_GUARANTEED);
+}
+
+static void
+bnxt_fill_vf_func_cfg_req_old(struct bnxt *bp,
+			      struct hwrm_func_cfg_input *req,
+			      int num_vfs)
 {
 	req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
 			HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
@@ -3407,60 +3406,29 @@ static void populate_vf_func_cfg_req(struct bnxt *bp,
 						 (num_vfs + 1));
 }
 
-static void add_random_mac_if_needed(struct bnxt *bp,
-				     struct hwrm_func_cfg_input *cfg_req,
-				     int vf)
-{
-	struct rte_ether_addr mac;
-
-	if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
-		return;
-
-	if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
-		cfg_req->enables |=
-		rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
-		rte_eth_random_addr(cfg_req->dflt_mac_addr);
-		bp->pf->vf_info[vf].random_mac = true;
-	} else {
-		memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes,
-			RTE_ETHER_ADDR_LEN);
-	}
-}
-
-static int reserve_resources_from_vf(struct bnxt *bp,
-				     struct hwrm_func_cfg_input *cfg_req,
+/* Update the port wide resource values based on how many resources
+ * got allocated to the VF.
+ */
+static int bnxt_update_max_resources(struct bnxt *bp,
 				     int vf)
 {
-	struct hwrm_func_qcaps_input req = {0};
-	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+	struct hwrm_func_qcfg_input req = {0};
+	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
 	int rc;
 
 	/* Get the actual allocated values now */
-	HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+	HWRM_CHECK_RESULT();
 
-	if (rc) {
-		PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
-		copy_func_cfg_to_qcaps(cfg_req, resp);
-	} else if (resp->error_code) {
-		rc = rte_le_to_cpu_16(resp->error_code);
-		PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
-		copy_func_cfg_to_qcaps(cfg_req, resp);
-	}
-
-	bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
-	bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
-	bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
-	bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
-	bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
-	bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
-	/*
-	 * TODO: While not supporting VMDq with VFs, max_vnics is always
-	 * forced to 1 in this case
-	 */
-	//bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
-	bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
+	bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
+	bp->max_stat_ctx -= rte_le_to_cpu_16(resp->alloc_stat_ctx);
+	bp->max_cp_rings -= rte_le_to_cpu_16(resp->alloc_cmpl_rings);
+	bp->max_tx_rings -= rte_le_to_cpu_16(resp->alloc_tx_rings);
+	bp->max_rx_rings -= rte_le_to_cpu_16(resp->alloc_rx_rings);
+	bp->max_l2_ctx -= rte_le_to_cpu_16(resp->alloc_l2_ctx);
+	bp->max_ring_grps -= rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
 
 	HWRM_UNLOCK();
 
@@ -3485,7 +3453,8 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
 	return rc;
 }
 
-static int update_pf_resource_max(struct bnxt *bp)
+static int bnxt_query_pf_resources(struct bnxt *bp,
+				   struct bnxt_pf_resource_info *pf_resc)
 {
 	struct hwrm_func_qcfg_input req = {0};
 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
@@ -3497,8 +3466,13 @@ static int update_pf_resource_max(struct bnxt *bp)
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 	HWRM_CHECK_RESULT();
 
-	/* Only TX ring value reflects actual allocation? TODO */
-	bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
+	pf_resc->num_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
+	pf_resc->num_rsscos_ctxs = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
+	pf_resc->num_stat_ctxs = rte_le_to_cpu_16(resp->alloc_stat_ctx);
+	pf_resc->num_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
+	pf_resc->num_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
+	pf_resc->num_l2_ctxs = rte_le_to_cpu_16(resp->alloc_l2_ctx);
+	pf_resc->num_hw_ring_grps = rte_le_to_cpu_32(resp->alloc_hw_ring_grps);
 	bp->pf->evb_mode = resp->evb_mode;
 
 	HWRM_UNLOCK();
@@ -3506,8 +3480,42 @@ static int update_pf_resource_max(struct bnxt *bp)
 	return rc;
 }
 
+static void
+bnxt_calculate_pf_resources(struct bnxt *bp,
+			    struct bnxt_pf_resource_info *pf_resc,
+			    int num_vfs)
+{
+	if (!num_vfs) {
+		pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx;
+		pf_resc->num_stat_ctxs = bp->max_stat_ctx;
+		pf_resc->num_cp_rings = bp->max_cp_rings;
+		pf_resc->num_tx_rings = bp->max_tx_rings;
+		pf_resc->num_rx_rings = bp->max_rx_rings;
+		pf_resc->num_l2_ctxs = bp->max_l2_ctx;
+		pf_resc->num_hw_ring_grps = bp->max_ring_grps;
+
+		return;
+	}
+
+	pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx / (num_vfs + 1) +
+				   bp->max_rsscos_ctx % (num_vfs + 1);
+	pf_resc->num_stat_ctxs = bp->max_stat_ctx / (num_vfs + 1) +
+				 bp->max_stat_ctx % (num_vfs + 1);
+	pf_resc->num_cp_rings = bp->max_cp_rings / (num_vfs + 1) +
+				bp->max_cp_rings % (num_vfs + 1);
+	pf_resc->num_tx_rings = bp->max_tx_rings / (num_vfs + 1) +
+				bp->max_tx_rings % (num_vfs + 1);
+	pf_resc->num_rx_rings = bp->max_rx_rings / (num_vfs + 1) +
+				bp->max_rx_rings % (num_vfs + 1);
+	pf_resc->num_l2_ctxs = bp->max_l2_ctx / (num_vfs + 1) +
+			       bp->max_l2_ctx % (num_vfs + 1);
+	pf_resc->num_hw_ring_grps = bp->max_ring_grps / (num_vfs + 1) +
+				    bp->max_ring_grps % (num_vfs + 1);
+}
+
 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
 {
+	struct bnxt_pf_resource_info pf_resc = { 0 };
 	int rc;
 
 	if (!BNXT_PF(bp)) {
@@ -3519,82 +3527,100 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
 	if (rc)
 		return rc;
 
+	bnxt_calculate_pf_resources(bp, &pf_resc, 0);
+
 	bp->pf->func_cfg_flags &=
 		~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
 		  HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
 	bp->pf->func_cfg_flags |=
 		HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
-	rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
+	rc = bnxt_hwrm_pf_func_cfg(bp, &pf_resc);
 	rc = __bnxt_hwrm_func_qcaps(bp);
 	return rc;
 }
 
-int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
+static int
+bnxt_configure_vf_req_buf(struct bnxt *bp, int num_vfs)
 {
-	struct hwrm_func_cfg_input req = {0};
-	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
-	int i;
-	size_t sz;
-	int rc = 0;
-	size_t req_buf_sz;
-
-	if (!BNXT_PF(bp)) {
-		PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
-		return -EINVAL;
-	}
-
-	rc = bnxt_hwrm_func_qcaps(bp);
-
-	if (rc)
-		return rc;
-
-	bp->pf->active_vfs = num_vfs;
-
-	/*
-	 * First, configure the PF to only use one TX ring.  This ensures that
-	 * there are enough rings for all VFs.
-	 *
-	 * If we don't do this, when we call func_alloc() later, we will lock
-	 * extra rings to the PF that won't be available during func_cfg() of
-	 * the VFs.
-	 *
-	 * This has been fixed with firmware versions above 20.6.54
-	 */
-	bp->pf->func_cfg_flags &=
-		~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
-		  HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
-	bp->pf->func_cfg_flags |=
-		HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
-	rc = bnxt_hwrm_pf_func_cfg(bp, 1);
-	if (rc)
-		return rc;
+	size_t req_buf_sz, sz;
+	int i, rc;
 
-	/*
-	 * Now, create and register a buffer to hold forwarded VF requests
-	 */
 	req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
 	bp->pf->vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
 		page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
 	if (bp->pf->vf_req_buf == NULL) {
-		rc = -ENOMEM;
-		goto error_free;
+		return -ENOMEM;
 	}
+
 	for (sz = 0; sz < req_buf_sz; sz += getpagesize())
 		rte_mem_lock_page(((char *)bp->pf->vf_req_buf) + sz);
+
 	for (i = 0; i < num_vfs; i++)
 		bp->pf->vf_info[i].req_buf = ((char *)bp->pf->vf_req_buf) +
-					(i * HWRM_MAX_REQ_LEN);
+					     (i * HWRM_MAX_REQ_LEN);
 
-	rc = bnxt_hwrm_func_buf_rgtr(bp);
+	rc = bnxt_hwrm_func_buf_rgtr(bp, num_vfs);
 	if (rc)
-		goto error_free;
+		rte_free(bp->pf->vf_req_buf);
 
-	populate_vf_func_cfg_req(bp, &req, num_vfs);
+	return rc;
+}
+
+static int
+bnxt_process_vf_resc_config_new(struct bnxt *bp, int num_vfs)
+{
+	struct hwrm_func_vf_resource_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+	struct hwrm_func_vf_resource_cfg_input req = {0};
+	int i, rc = 0;
 
+	bnxt_fill_vf_func_cfg_req_new(bp, &req, num_vfs);
 	bp->pf->active_vfs = 0;
 	for (i = 0; i < num_vfs; i++) {
-		add_random_mac_if_needed(bp, &req, i);
+		HWRM_PREP(&req, HWRM_FUNC_VF_RESOURCE_CFG, BNXT_USE_CHIMP_MB);
+		req.vf_id = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
+		rc = bnxt_hwrm_send_message(bp,
+					    &req,
+					    sizeof(req),
+					    BNXT_USE_CHIMP_MB);
+		if (rc || resp->error_code) {
+			PMD_DRV_LOG(ERR,
+				"Failed to initialize VF %d\n", i);
+			PMD_DRV_LOG(ERR,
+				"Not all VFs available. (%d, %d)\n",
+				rc, resp->error_code);
+			HWRM_UNLOCK();
 
+			/* If the first VF configuration itself fails,
+			 * unregister the vf_fwd_request buffer.
+			 */
+			if (i == 0)
+				bnxt_hwrm_func_buf_unrgtr(bp);
+			break;
+		}
+		HWRM_UNLOCK();
+
+		/* Update the max resource values based on the resource values
+		 * allocated to the VF.
+		 */
+		bnxt_update_max_resources(bp, i);
+		bp->pf->active_vfs++;
+		bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
+	}
+
+	return 0;
+}
+
+static int
+bnxt_process_vf_resc_config_old(struct bnxt *bp, int num_vfs)
+{
+	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+	struct hwrm_func_cfg_input req = {0};
+	int i, rc;
+
+	bnxt_fill_vf_func_cfg_req_old(bp, &req, num_vfs);
+
+	bp->pf->active_vfs = 0;
+	for (i = 0; i < num_vfs; i++) {
 		HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
 		req.flags = rte_cpu_to_le_32(bp->pf->vf_info[i].func_cfg_flags);
 		req.fid = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
@@ -3609,40 +3635,107 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
 
 		if (rc || resp->error_code) {
 			PMD_DRV_LOG(ERR,
-				"Failed to initizlie VF %d\n", i);
+				"Failed to initialize VF %d\n", i);
 			PMD_DRV_LOG(ERR,
 				"Not all VFs available. (%d, %d)\n",
 				rc, resp->error_code);
 			HWRM_UNLOCK();
+
+			/* If the first VF configuration itself fails,
+			 * unregister the vf_fwd_request buffer.
+			 */
+			if (i == 0)
+				bnxt_hwrm_func_buf_unrgtr(bp);
 			break;
 		}
 
 		HWRM_UNLOCK();
 
-		reserve_resources_from_vf(bp, &req, i);
+		/* Update the max resource values based on the resource values
+		 * allocated to the VF.
+		 */
+		bnxt_update_max_resources(bp, i);
 		bp->pf->active_vfs++;
 		bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
 	}
 
+	return 0;
+}
+
+static void
+bnxt_configure_vf_resources(struct bnxt *bp, int num_vfs)
+{
+	if (bp->flags & BNXT_FLAG_NEW_RM)
+		bnxt_process_vf_resc_config_new(bp, num_vfs);
+	else
+		bnxt_process_vf_resc_config_old(bp, num_vfs);
+}
+
+static void
+bnxt_update_pf_resources(struct bnxt *bp,
+			 struct bnxt_pf_resource_info *pf_resc)
+{
+	bp->max_rsscos_ctx = pf_resc->num_rsscos_ctxs;
+	bp->max_stat_ctx = pf_resc->num_stat_ctxs;
+	bp->max_cp_rings = pf_resc->num_cp_rings;
+	bp->max_tx_rings = pf_resc->num_tx_rings;
+	bp->max_rx_rings = pf_resc->num_rx_rings;
+	bp->max_ring_grps = pf_resc->num_hw_ring_grps;
+}
+
+static int32_t
+bnxt_configure_pf_resources(struct bnxt *bp,
+			    struct bnxt_pf_resource_info *pf_resc)
+{
 	/*
-	 * Now configure the PF to use "the rest" of the resources
-	 * We're using STD_TX_RING_MODE here though which will limit the TX
-	 * rings.  This will allow QoS to function properly.  Not setting this
+	 * We're using STD_TX_RING_MODE here which will limit the TX
+	 * rings. This will allow QoS to function properly. Not setting this
 	 * will cause PF rings to break bandwidth settings.
 	 */
-	rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
+	bp->pf->func_cfg_flags &=
+		~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
+		  HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
+	bp->pf->func_cfg_flags |=
+		HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
+	return bnxt_hwrm_pf_func_cfg(bp, pf_resc);
+}
+
+int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
+{
+	struct bnxt_pf_resource_info pf_resc = { 0 };
+	int rc;
+
+	if (!BNXT_PF(bp)) {
+		PMD_DRV_LOG(ERR, "Attempt to allocate VFs on a VF!\n");
+		return -EINVAL;
+	}
+
+	rc = bnxt_hwrm_func_qcaps(bp);
 	if (rc)
-		goto error_free;
+		return rc;
+
+	bnxt_calculate_pf_resources(bp, &pf_resc, num_vfs);
 
-	rc = update_pf_resource_max(bp);
+	rc = bnxt_configure_pf_resources(bp, &pf_resc);
 	if (rc)
-		goto error_free;
+		return rc;
 
-	return rc;
+	rc = bnxt_query_pf_resources(bp, &pf_resc);
+	if (rc)
+		return rc;
 
-error_free:
-	bnxt_hwrm_func_buf_unrgtr(bp);
-	return rc;
+	/*
+	 * Now, create and register a buffer to hold forwarded VF requests
+	 */
+	rc = bnxt_configure_vf_req_buf(bp, num_vfs);
+	if (rc)
+		return rc;
+
+	bnxt_configure_vf_resources(bp, num_vfs);
+
+	bnxt_update_pf_resources(bp, &pf_resc);
+
+	return 0;
 }
 
 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
@@ -3747,23 +3840,24 @@ int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 	return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
 }
 
-int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
+int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp, int num_vfs)
 {
-	int rc = 0;
-	struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
 	struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
+	struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
+	int rc;
 
 	HWRM_PREP(&req, HWRM_FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
 
 	req.req_buf_num_pages = rte_cpu_to_le_16(1);
-	req.req_buf_page_size = rte_cpu_to_le_16(
-			 page_getenum(bp->pf->active_vfs * HWRM_MAX_REQ_LEN));
+	req.req_buf_page_size =
+		rte_cpu_to_le_16(page_getenum(num_vfs * HWRM_MAX_REQ_LEN));
 	req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
 	req.req_buf_page_addr0 =
 		rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf->vf_req_buf));
 	if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
 		PMD_DRV_LOG(ERR,
 			"unable to map buffer address to physical memory\n");
+		HWRM_UNLOCK();
 		return -ENOMEM;
 	}
 
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index e98b1fe..a7fa7f6 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -107,6 +107,16 @@ enum bnxt_flow_dir {
 	BNXT_DIR_MAX
 };
 
+struct bnxt_pf_resource_info {
+	uint16_t num_rsscos_ctxs;
+	uint16_t num_stat_ctxs;
+	uint16_t num_tx_rings;
+	uint16_t num_rx_rings;
+	uint16_t num_cp_rings;
+	uint16_t num_l2_ctxs;
+	uint32_t num_hw_ring_grps;
+};
+
 #define BNXT_CTX_VAL_INVAL	0xFFFF
 
 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp,
@@ -127,7 +137,7 @@ int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
 			      void *encaped, size_t ec_size);
 
-int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp);
+int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp, int num_vfs);
 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp);
 int bnxt_hwrm_func_driver_register(struct bnxt *bp);
 int bnxt_hwrm_func_qcaps(struct bnxt *bp);
-- 
2.7.4


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-stable] [PATCH 07/13] net/bnxt: register PF for default vnic change async event
       [not found] <20201009111130.10422-1-somnath.kotur@broadcom.com>
  2020-10-09 11:11 ` [dpdk-stable] [PATCH 01/13] net/bnxt: fix the corruption of the session details Somnath Kotur
  2020-10-09 11:11 ` [dpdk-stable] [PATCH 04/13] net/bnxt: fixes for PMD PF support in SR-IOV mode Somnath Kotur
@ 2020-10-09 11:11 ` Somnath Kotur
  2020-10-09 11:11 ` [dpdk-stable] [PATCH 13/13] net/bnxt: remove parent fid validation in vnic change event processing Somnath Kotur
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 10+ messages in thread
From: Somnath Kotur @ 2020-10-09 11:11 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit, Venkat Duvvuru, stable, Somnath Kotur

From: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>

Currently, we are only registering to this event if the function
is a trusted VF. This patch extends it for PFs as well.

Fixes: 322bd6e70272 ("net/bnxt: add port representor infrastructure")
Cc: stable@dpdk.org

Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
---
 drivers/net/bnxt/bnxt_hwrm.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index e49f56c..4e2061e 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -938,7 +938,7 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
 		req.async_event_fwd[1] |=
 			rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION);
 
-	if (BNXT_VF_IS_TRUSTED(bp))
+	if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))
 		req.async_event_fwd[1] |=
 		rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE);
 
-- 
2.7.4


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-stable] [PATCH 13/13] net/bnxt: remove parent fid validation in vnic change event processing
       [not found] <20201009111130.10422-1-somnath.kotur@broadcom.com>
                   ` (2 preceding siblings ...)
  2020-10-09 11:11 ` [dpdk-stable] [PATCH 07/13] net/bnxt: register PF for default vnic change async event Somnath Kotur
@ 2020-10-09 11:11 ` Somnath Kotur
  2020-10-10  4:05 ` [dpdk-stable] [PATCH v2 01/12] net/bnxt: fix the corruption of the session details Ajit Khaparde
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 10+ messages in thread
From: Somnath Kotur @ 2020-10-09 11:11 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit, Venkat Duvvuru, stable, Somnath Kotur

From: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>

Currently, when default vnic change async event is received, the async
event handler will try to validate whether the parent fid that is coming
as part of the event data is of the control channel's fid.
This validation will fail in case of SR device because the parent
function of the vnic and the control channel function (Foster parent)
are different.

This patch fixes the problem by removing this parent fid validation as
it is not really needed in case of Wh+ also.

Fixes: 322bd6e70272 ("net/bnxt: add port representor infrastructure")
Cc: stable@dpdk.org

Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
---
 drivers/net/bnxt/bnxt_cpr.c | 7 ++-----
 1 file changed, 2 insertions(+), 5 deletions(-)

diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
index 5492394..91d1ffe 100644
--- a/drivers/net/bnxt/bnxt_cpr.c
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -50,7 +50,7 @@ static void
 bnxt_process_default_vnic_change(struct bnxt *bp,
 				 struct hwrm_async_event_cmpl *async_cmp)
 {
-	uint16_t fid, vnic_state, parent_id, vf_fid, vf_id;
+	uint16_t vnic_state, vf_fid, vf_id;
 	struct bnxt_representor *vf_rep_bp;
 	struct rte_eth_dev *eth_dev;
 	bool vfr_found = false;
@@ -67,10 +67,7 @@ bnxt_process_default_vnic_change(struct bnxt *bp,
 	if (vnic_state != BNXT_DEFAULT_VNIC_ALLOC)
 		return;
 
-	parent_id = (event_data & BNXT_DEFAULT_VNIC_CHANGE_PF_ID_MASK) >>
-			BNXT_DEFAULT_VNIC_CHANGE_PF_ID_SFT;
-	fid = BNXT_PF(bp) ? bp->fw_fid : bp->parent->fid;
-	if (parent_id != fid || !bp->rep_info)
+	if (!bp->rep_info)
 		return;
 
 	vf_fid = (event_data & BNXT_DEFAULT_VNIC_CHANGE_VF_ID_MASK) >>
-- 
2.7.4


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-stable] [PATCH v2 01/12] net/bnxt: fix the corruption of the session details
       [not found] <20201009111130.10422-1-somnath.kotur@broadcom.com>
                   ` (3 preceding siblings ...)
  2020-10-09 11:11 ` [dpdk-stable] [PATCH 13/13] net/bnxt: remove parent fid validation in vnic change event processing Somnath Kotur
@ 2020-10-10  4:05 ` Ajit Khaparde
  2020-10-10  4:05 ` [dpdk-stable] [PATCH v2 04/12] net/bnxt: fix PMD PF support in SR-IOV mode Ajit Khaparde
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 10+ messages in thread
From: Ajit Khaparde @ 2020-10-10  4:05 UTC (permalink / raw)
  To: dev; +Cc: Kishore Padmanabha, stable, Mike Baucom

From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>

The session details that is shared among multiple ports
need to be outside the bnxt structure.

Fixes: 70e64b27af5b ("net/bnxt: support ULP session manager cleanup")
Cc: stable@dpdk.org

Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
 drivers/net/bnxt/tf_ulp/bnxt_ulp.c | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
index 289619411..a4d48c71a 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
@@ -159,7 +159,9 @@ ulp_ctx_session_open(struct bnxt *bp,
 	}
 	if (!session->session_opened) {
 		session->session_opened = 1;
-		session->g_tfp = &bp->tfp;
+		session->g_tfp = rte_zmalloc("bnxt_ulp_session_tfp",
+					     sizeof(struct tf), 0);
+		session->g_tfp->session = bp->tfp.session;
 	}
 	return rc;
 }
@@ -176,6 +178,7 @@ ulp_ctx_session_close(struct bnxt *bp,
 	if (session->session_opened)
 		tf_close_session(&bp->tfp);
 	session->session_opened = 0;
+	rte_free(session->g_tfp);
 	session->g_tfp = NULL;
 }
 
-- 
2.21.1 (Apple Git-122.3)


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-stable] [PATCH v2 04/12] net/bnxt: fix PMD PF support in SR-IOV mode
       [not found] <20201009111130.10422-1-somnath.kotur@broadcom.com>
                   ` (4 preceding siblings ...)
  2020-10-10  4:05 ` [dpdk-stable] [PATCH v2 01/12] net/bnxt: fix the corruption of the session details Ajit Khaparde
@ 2020-10-10  4:05 ` Ajit Khaparde
  2020-10-10  4:05 ` [dpdk-stable] [PATCH v2 07/12] net/bnxt: handle default vnic change async event Ajit Khaparde
       [not found] ` <20201010041153.63921-1-ajit.khaparde@broadcom.com>
  7 siblings, 0 replies; 10+ messages in thread
From: Ajit Khaparde @ 2020-10-10  4:05 UTC (permalink / raw)
  To: dev; +Cc: Venkat Duvvuru, stable, Somnath Kotur

From: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>

1. Implement HWRM_FUNC_VF_RESOURCE_CFG command and use it to
   reserve resources for VFs when NEW RM is enabled.
2. Invoke PF’s FUNC_CFG before configuring VFs resources.
3. Don’t consider max_rx_em_flows in max_l2_ctx calculation
   when VFs are configured.
4. Issue HWRM_FUNC_QCFG instead of HWRM_FUNC_QCAPS to find
   out the actual allocated resources for VF.
5. Don’t add random mac to the VF.
6. Handle completion type CMPL_BASE_TYPE_HWRM_FWD_REQ instead
   of CMPL_BASE_TYPE_HWRM_FWD_RESP.
7. Don't enable HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE
   when the list of HWRM commands that needs to be forwarded
   to the PF is specified in HWRM_FUNC_DRV_RGTR.
8. Update the HWRM commands list that can be forwared to the
   PF.

Fixes: b7778e8a1c00 ("net/bnxt: refactor to properly allocate resources for PF/VF")
Cc: stable@dpdk.org

Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
 drivers/net/bnxt/bnxt.h        |   6 +-
 drivers/net/bnxt/bnxt_cpr.c    |   6 +-
 drivers/net/bnxt/bnxt_ethdev.c |  40 +--
 drivers/net/bnxt/bnxt_hwrm.c   | 461 ++++++++++++++++++++-------------
 drivers/net/bnxt/bnxt_hwrm.h   |  12 +-
 5 files changed, 309 insertions(+), 216 deletions(-)

diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index eca74486e..a951bca7a 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -167,6 +167,9 @@
 #define	BNXT_DEFAULT_VNIC_CHANGE_VF_ID_SFT		\
 	HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT
 
+#define BNXT_HWRM_CMD_TO_FORWARD(cmd)	\
+		(bp->pf->vf_req_fwd[(cmd) / 32] |= (1 << ((cmd) % 32)))
+
 struct bnxt_led_info {
 	uint8_t	     num_leds;
 	uint8_t      led_id;
@@ -664,9 +667,10 @@ struct bnxt {
 #define BNXT_FW_CAP_IF_CHANGE		BIT(1)
 #define BNXT_FW_CAP_ERROR_RECOVERY	BIT(2)
 #define BNXT_FW_CAP_ERR_RECOVER_RELOAD	BIT(3)
+#define BNXT_FW_CAP_HCOMM_FW_STATUS	BIT(4)
 #define BNXT_FW_CAP_ADV_FLOW_MGMT	BIT(5)
 #define BNXT_FW_CAP_ADV_FLOW_COUNTERS	BIT(6)
-#define BNXT_FW_CAP_HCOMM_FW_STATUS	BIT(7)
+#define BNXT_FW_CAP_LINK_ADMIN		BIT(7)
 
 	pthread_mutex_t         flow_lock;
 
diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
index a3a7e6ab7..54923948f 100644
--- a/drivers/net/bnxt/bnxt_cpr.c
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -239,7 +239,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
 		goto reject;
 	}
 
-	if (bnxt_rcv_msg_from_vf(bp, vf_id, fwd_cmd) == true) {
+	if (bnxt_rcv_msg_from_vf(bp, vf_id, fwd_cmd)) {
 		/*
 		 * In older firmware versions, the MAC had to be all zeros for
 		 * the VF to set it's MAC via hwrm_func_vf_cfg. Set to all
@@ -254,6 +254,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
 				(const uint8_t *)"\x00\x00\x00\x00\x00");
 			}
 		}
+
 		if (fwd_cmd->req_type == HWRM_CFA_L2_SET_RX_MASK) {
 			struct hwrm_cfa_l2_set_rx_mask_input *srm =
 							(void *)fwd_cmd;
@@ -265,6 +266,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
 			    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN |
 			    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN);
 		}
+
 		/* Forward */
 		rc = bnxt_hwrm_exec_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
 		if (rc) {
@@ -306,7 +308,7 @@ int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp)
 		bnxt_handle_async_event(bp, cmp);
 		evt = 1;
 		break;
-	case CMPL_BASE_TYPE_HWRM_FWD_RESP:
+	case CMPL_BASE_TYPE_HWRM_FWD_REQ:
 		/* Handle HWRM forwarded responses */
 		bnxt_handle_fwd_req(bp, cmp);
 		evt = 1;
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 8b63134c3..b4654ec6a 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -5208,37 +5208,14 @@ static void bnxt_config_vf_req_fwd(struct bnxt *bp)
 	if (!BNXT_PF(bp))
 		return;
 
-#define ALLOW_FUNC(x)	\
-	{ \
-		uint32_t arg = (x); \
-		bp->pf->vf_req_fwd[((arg) >> 5)] &= \
-		~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
-	}
-
-	/* Forward all requests if firmware is new enough */
-	if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
-	     (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
-	    ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
-		memset(bp->pf->vf_req_fwd, 0xff, sizeof(bp->pf->vf_req_fwd));
-	} else {
-		PMD_DRV_LOG(WARNING,
-			    "Firmware too old for VF mailbox functionality\n");
-		memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd));
-	}
+	memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd));
 
-	/*
-	 * The following are used for driver cleanup. If we disallow these,
-	 * VF drivers can't clean up cleanly.
-	 */
-	ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
-	ALLOW_FUNC(HWRM_VNIC_FREE);
-	ALLOW_FUNC(HWRM_RING_FREE);
-	ALLOW_FUNC(HWRM_RING_GRP_FREE);
-	ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
-	ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
-	ALLOW_FUNC(HWRM_STAT_CTX_FREE);
-	ALLOW_FUNC(HWRM_PORT_PHY_QCFG);
-	ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
+	if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN))
+		BNXT_HWRM_CMD_TO_FORWARD(HWRM_PORT_PHY_QCFG);
+	BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_CFG);
+	BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_VF_CFG);
+	BNXT_HWRM_CMD_TO_FORWARD(HWRM_CFA_L2_FILTER_ALLOC);
+	BNXT_HWRM_CMD_TO_FORWARD(HWRM_OEM_CMD);
 }
 
 uint16_t
@@ -6189,7 +6166,10 @@ bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev)
 
 	bnxt_free_int(bp);
 	bnxt_free_mem(bp, reconfig_dev);
+
 	bnxt_hwrm_func_buf_unrgtr(bp);
+	rte_free(bp->pf->vf_req_buf);
+
 	rc = bnxt_hwrm_func_driver_unregister(bp, 0);
 	bp->flags &= ~BNXT_FLAG_REGISTERED;
 	bnxt_free_ctx_mem(bp);
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index faeaf4b5d..8133afc74 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -765,7 +765,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
 	bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
 	bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
 	bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
-	if (!BNXT_CHIP_THOR(bp))
+	if (!BNXT_CHIP_THOR(bp) && !bp->pdev->max_vfs)
 		bp->max_l2_ctx += bp->max_rx_em_flows;
 	/* TODO: For now, do not support VMDq/RFS on VFs. */
 	if (BNXT_PF(bp)) {
@@ -803,6 +803,9 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE)
 		bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
 
+	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
+		bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
+
 	HWRM_UNLOCK();
 
 	return rc;
@@ -818,16 +821,15 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
 		if (rc)
 			return rc;
 
+		/* On older FW,
+		 * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.
+		 * But the error can be ignored. Return success.
+		 */
 		rc = bnxt_hwrm_func_resc_qcaps(bp);
 		if (!rc)
 			bp->flags |= BNXT_FLAG_NEW_RM;
 	}
 
-	/* On older FW,
-	 * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.
-	 * But the error can be ignored. Return success.
-	 */
-
 	return 0;
 }
 
@@ -916,14 +918,6 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
 		memcpy(req.vf_req_fwd, bp->pf->vf_req_fwd,
 		       RTE_MIN(sizeof(req.vf_req_fwd),
 			       sizeof(bp->pf->vf_req_fwd)));
-
-		/*
-		 * PF can sniff HWRM API issued by VF. This can be set up by
-		 * linux driver and inherited by the DPDK PF driver. Clear
-		 * this HWRM sniffer list in FW because DPDK PF driver does
-		 * not support this.
-		 */
-		flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE;
 	}
 
 	req.flags = rte_cpu_to_le_32(flags);
@@ -1052,21 +1046,19 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
 
 	HWRM_CHECK_RESULT_SILENT();
 
-	if (BNXT_VF(bp)) {
-		bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
-		bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
-		bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
-		bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
-		bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
-		/* func_resource_qcaps does not return max_rx_em_flows.
-		 * So use the value provided by func_qcaps.
-		 */
-		bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
-		if (!BNXT_CHIP_THOR(bp))
-			bp->max_l2_ctx += bp->max_rx_em_flows;
-		bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
-		bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
-	}
+	bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
+	bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
+	bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
+	bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
+	bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
+	/* func_resource_qcaps does not return max_rx_em_flows.
+	 * So use the value provided by func_qcaps.
+	 */
+	bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
+	if (!BNXT_CHIP_THOR(bp) && !bp->pdev->max_vfs)
+		bp->max_l2_ctx += bp->max_rx_em_flows;
+	bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
+	bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
 	bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
 	bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
 	if (bp->vf_resv_strategy >
@@ -3300,33 +3292,8 @@ int bnxt_hwrm_port_mac_qcfg(struct bnxt *bp)
 	return 0;
 }
 
-static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
-				   struct hwrm_func_qcaps_output *qcaps)
-{
-	qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
-	memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
-	       sizeof(qcaps->mac_address));
-	qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
-	qcaps->max_rx_rings = fcfg->num_rx_rings;
-	qcaps->max_tx_rings = fcfg->num_tx_rings;
-	qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
-	qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
-	qcaps->max_vfs = 0;
-	qcaps->first_vf_id = 0;
-	qcaps->max_vnics = fcfg->num_vnics;
-	qcaps->max_decap_records = 0;
-	qcaps->max_encap_records = 0;
-	qcaps->max_tx_wm_flows = 0;
-	qcaps->max_tx_em_flows = 0;
-	qcaps->max_rx_wm_flows = 0;
-	qcaps->max_rx_em_flows = 0;
-	qcaps->max_flow_id = 0;
-	qcaps->max_mcast_filters = fcfg->num_mcast_filters;
-	qcaps->max_sp_tx_rings = 0;
-	qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
-}
-
-static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
+static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp,
+				 struct bnxt_pf_resource_info *pf_resc)
 {
 	struct hwrm_func_cfg_input req = {0};
 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
@@ -3345,7 +3312,8 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
 
 	if (BNXT_HAS_RING_GRPS(bp)) {
 		enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
-		req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
+		req.num_hw_ring_grps =
+			rte_cpu_to_le_16(pf_resc->num_hw_ring_grps);
 	} else if (BNXT_HAS_NQ(bp)) {
 		enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
 		req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
@@ -3354,12 +3322,12 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
 	req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
 	req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
 	req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
-	req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
-	req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
-	req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
-	req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
-	req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
-	req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
+	req.num_rsscos_ctxs = rte_cpu_to_le_16(pf_resc->num_rsscos_ctxs);
+	req.num_stat_ctxs = rte_cpu_to_le_16(pf_resc->num_stat_ctxs);
+	req.num_cmpl_rings = rte_cpu_to_le_16(pf_resc->num_cp_rings);
+	req.num_tx_rings = rte_cpu_to_le_16(pf_resc->num_tx_rings);
+	req.num_rx_rings = rte_cpu_to_le_16(pf_resc->num_rx_rings);
+	req.num_l2_ctxs = rte_cpu_to_le_16(pf_resc->num_l2_ctxs);
 	req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
 	req.fid = rte_cpu_to_le_16(0xffff);
 	req.enables = rte_cpu_to_le_32(enables);
@@ -3374,9 +3342,43 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
 	return rc;
 }
 
-static void populate_vf_func_cfg_req(struct bnxt *bp,
-				     struct hwrm_func_cfg_input *req,
-				     int num_vfs)
+/* min values are the guaranteed resources and max values are subject
+ * to availability. The strategy for now is to keep both min & max
+ * values the same.
+ */
+static void
+bnxt_fill_vf_func_cfg_req_new(struct bnxt *bp,
+			      struct hwrm_func_vf_resource_cfg_input *req,
+			      int num_vfs)
+{
+	req->max_rsscos_ctx = rte_cpu_to_le_16(bp->max_rsscos_ctx /
+					       (num_vfs + 1));
+	req->min_rsscos_ctx = req->max_rsscos_ctx;
+	req->max_stat_ctx = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
+	req->min_stat_ctx = req->max_stat_ctx;
+	req->max_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
+					       (num_vfs + 1));
+	req->min_cmpl_rings = req->max_cmpl_rings;
+	req->max_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
+	req->min_tx_rings = req->max_tx_rings;
+	req->max_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
+	req->min_rx_rings = req->max_rx_rings;
+	req->max_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
+	req->min_l2_ctxs = req->max_l2_ctxs;
+	/* TODO: For now, do not support VMDq/RFS on VFs. */
+	req->max_vnics = rte_cpu_to_le_16(1);
+	req->min_vnics = req->max_vnics;
+	req->max_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
+						 (num_vfs + 1));
+	req->min_hw_ring_grps = req->max_hw_ring_grps;
+	req->flags =
+	 rte_cpu_to_le_16(HWRM_FUNC_VF_RESOURCE_CFG_INPUT_FLAGS_MIN_GUARANTEED);
+}
+
+static void
+bnxt_fill_vf_func_cfg_req_old(struct bnxt *bp,
+			      struct hwrm_func_cfg_input *req,
+			      int num_vfs)
 {
 	req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
 			HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
@@ -3407,60 +3409,29 @@ static void populate_vf_func_cfg_req(struct bnxt *bp,
 						 (num_vfs + 1));
 }
 
-static void add_random_mac_if_needed(struct bnxt *bp,
-				     struct hwrm_func_cfg_input *cfg_req,
-				     int vf)
-{
-	struct rte_ether_addr mac;
-
-	if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
-		return;
-
-	if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
-		cfg_req->enables |=
-		rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
-		rte_eth_random_addr(cfg_req->dflt_mac_addr);
-		bp->pf->vf_info[vf].random_mac = true;
-	} else {
-		memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes,
-			RTE_ETHER_ADDR_LEN);
-	}
-}
-
-static int reserve_resources_from_vf(struct bnxt *bp,
-				     struct hwrm_func_cfg_input *cfg_req,
+/* Update the port wide resource values based on how many resources
+ * got allocated to the VF.
+ */
+static int bnxt_update_max_resources(struct bnxt *bp,
 				     int vf)
 {
-	struct hwrm_func_qcaps_input req = {0};
-	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+	struct hwrm_func_qcfg_input req = {0};
+	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
 	int rc;
 
 	/* Get the actual allocated values now */
-	HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+	HWRM_CHECK_RESULT();
 
-	if (rc) {
-		PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
-		copy_func_cfg_to_qcaps(cfg_req, resp);
-	} else if (resp->error_code) {
-		rc = rte_le_to_cpu_16(resp->error_code);
-		PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
-		copy_func_cfg_to_qcaps(cfg_req, resp);
-	}
-
-	bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
-	bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
-	bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
-	bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
-	bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
-	bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
-	/*
-	 * TODO: While not supporting VMDq with VFs, max_vnics is always
-	 * forced to 1 in this case
-	 */
-	//bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
-	bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
+	bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
+	bp->max_stat_ctx -= rte_le_to_cpu_16(resp->alloc_stat_ctx);
+	bp->max_cp_rings -= rte_le_to_cpu_16(resp->alloc_cmpl_rings);
+	bp->max_tx_rings -= rte_le_to_cpu_16(resp->alloc_tx_rings);
+	bp->max_rx_rings -= rte_le_to_cpu_16(resp->alloc_rx_rings);
+	bp->max_l2_ctx -= rte_le_to_cpu_16(resp->alloc_l2_ctx);
+	bp->max_ring_grps -= rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
 
 	HWRM_UNLOCK();
 
@@ -3485,7 +3456,8 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
 	return rc;
 }
 
-static int update_pf_resource_max(struct bnxt *bp)
+static int bnxt_query_pf_resources(struct bnxt *bp,
+				   struct bnxt_pf_resource_info *pf_resc)
 {
 	struct hwrm_func_qcfg_input req = {0};
 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
@@ -3497,8 +3469,13 @@ static int update_pf_resource_max(struct bnxt *bp)
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 	HWRM_CHECK_RESULT();
 
-	/* Only TX ring value reflects actual allocation? TODO */
-	bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
+	pf_resc->num_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
+	pf_resc->num_rsscos_ctxs = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
+	pf_resc->num_stat_ctxs = rte_le_to_cpu_16(resp->alloc_stat_ctx);
+	pf_resc->num_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
+	pf_resc->num_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
+	pf_resc->num_l2_ctxs = rte_le_to_cpu_16(resp->alloc_l2_ctx);
+	pf_resc->num_hw_ring_grps = rte_le_to_cpu_32(resp->alloc_hw_ring_grps);
 	bp->pf->evb_mode = resp->evb_mode;
 
 	HWRM_UNLOCK();
@@ -3506,8 +3483,42 @@ static int update_pf_resource_max(struct bnxt *bp)
 	return rc;
 }
 
+static void
+bnxt_calculate_pf_resources(struct bnxt *bp,
+			    struct bnxt_pf_resource_info *pf_resc,
+			    int num_vfs)
+{
+	if (!num_vfs) {
+		pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx;
+		pf_resc->num_stat_ctxs = bp->max_stat_ctx;
+		pf_resc->num_cp_rings = bp->max_cp_rings;
+		pf_resc->num_tx_rings = bp->max_tx_rings;
+		pf_resc->num_rx_rings = bp->max_rx_rings;
+		pf_resc->num_l2_ctxs = bp->max_l2_ctx;
+		pf_resc->num_hw_ring_grps = bp->max_ring_grps;
+
+		return;
+	}
+
+	pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx / (num_vfs + 1) +
+				   bp->max_rsscos_ctx % (num_vfs + 1);
+	pf_resc->num_stat_ctxs = bp->max_stat_ctx / (num_vfs + 1) +
+				 bp->max_stat_ctx % (num_vfs + 1);
+	pf_resc->num_cp_rings = bp->max_cp_rings / (num_vfs + 1) +
+				bp->max_cp_rings % (num_vfs + 1);
+	pf_resc->num_tx_rings = bp->max_tx_rings / (num_vfs + 1) +
+				bp->max_tx_rings % (num_vfs + 1);
+	pf_resc->num_rx_rings = bp->max_rx_rings / (num_vfs + 1) +
+				bp->max_rx_rings % (num_vfs + 1);
+	pf_resc->num_l2_ctxs = bp->max_l2_ctx / (num_vfs + 1) +
+			       bp->max_l2_ctx % (num_vfs + 1);
+	pf_resc->num_hw_ring_grps = bp->max_ring_grps / (num_vfs + 1) +
+				    bp->max_ring_grps % (num_vfs + 1);
+}
+
 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
 {
+	struct bnxt_pf_resource_info pf_resc = { 0 };
 	int rc;
 
 	if (!BNXT_PF(bp)) {
@@ -3519,82 +3530,100 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
 	if (rc)
 		return rc;
 
+	bnxt_calculate_pf_resources(bp, &pf_resc, 0);
+
 	bp->pf->func_cfg_flags &=
 		~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
 		  HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
 	bp->pf->func_cfg_flags |=
 		HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
-	rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
+	rc = bnxt_hwrm_pf_func_cfg(bp, &pf_resc);
 	rc = __bnxt_hwrm_func_qcaps(bp);
 	return rc;
 }
 
-int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
+static int
+bnxt_configure_vf_req_buf(struct bnxt *bp, int num_vfs)
 {
-	struct hwrm_func_cfg_input req = {0};
-	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
-	int i;
-	size_t sz;
-	int rc = 0;
-	size_t req_buf_sz;
-
-	if (!BNXT_PF(bp)) {
-		PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
-		return -EINVAL;
-	}
-
-	rc = bnxt_hwrm_func_qcaps(bp);
-
-	if (rc)
-		return rc;
-
-	bp->pf->active_vfs = num_vfs;
-
-	/*
-	 * First, configure the PF to only use one TX ring.  This ensures that
-	 * there are enough rings for all VFs.
-	 *
-	 * If we don't do this, when we call func_alloc() later, we will lock
-	 * extra rings to the PF that won't be available during func_cfg() of
-	 * the VFs.
-	 *
-	 * This has been fixed with firmware versions above 20.6.54
-	 */
-	bp->pf->func_cfg_flags &=
-		~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
-		  HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
-	bp->pf->func_cfg_flags |=
-		HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
-	rc = bnxt_hwrm_pf_func_cfg(bp, 1);
-	if (rc)
-		return rc;
+	size_t req_buf_sz, sz;
+	int i, rc;
 
-	/*
-	 * Now, create and register a buffer to hold forwarded VF requests
-	 */
 	req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
 	bp->pf->vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
 		page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
 	if (bp->pf->vf_req_buf == NULL) {
-		rc = -ENOMEM;
-		goto error_free;
+		return -ENOMEM;
 	}
+
 	for (sz = 0; sz < req_buf_sz; sz += getpagesize())
 		rte_mem_lock_page(((char *)bp->pf->vf_req_buf) + sz);
+
 	for (i = 0; i < num_vfs; i++)
 		bp->pf->vf_info[i].req_buf = ((char *)bp->pf->vf_req_buf) +
-					(i * HWRM_MAX_REQ_LEN);
+					     (i * HWRM_MAX_REQ_LEN);
 
-	rc = bnxt_hwrm_func_buf_rgtr(bp);
+	rc = bnxt_hwrm_func_buf_rgtr(bp, num_vfs);
 	if (rc)
-		goto error_free;
+		rte_free(bp->pf->vf_req_buf);
+
+	return rc;
+}
 
-	populate_vf_func_cfg_req(bp, &req, num_vfs);
+static int
+bnxt_process_vf_resc_config_new(struct bnxt *bp, int num_vfs)
+{
+	struct hwrm_func_vf_resource_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+	struct hwrm_func_vf_resource_cfg_input req = {0};
+	int i, rc = 0;
 
+	bnxt_fill_vf_func_cfg_req_new(bp, &req, num_vfs);
 	bp->pf->active_vfs = 0;
 	for (i = 0; i < num_vfs; i++) {
-		add_random_mac_if_needed(bp, &req, i);
+		HWRM_PREP(&req, HWRM_FUNC_VF_RESOURCE_CFG, BNXT_USE_CHIMP_MB);
+		req.vf_id = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
+		rc = bnxt_hwrm_send_message(bp,
+					    &req,
+					    sizeof(req),
+					    BNXT_USE_CHIMP_MB);
+		if (rc || resp->error_code) {
+			PMD_DRV_LOG(ERR,
+				"Failed to initialize VF %d\n", i);
+			PMD_DRV_LOG(ERR,
+				"Not all VFs available. (%d, %d)\n",
+				rc, resp->error_code);
+			HWRM_UNLOCK();
+
+			/* If the first VF configuration itself fails,
+			 * unregister the vf_fwd_request buffer.
+			 */
+			if (i == 0)
+				bnxt_hwrm_func_buf_unrgtr(bp);
+			break;
+		}
+		HWRM_UNLOCK();
+
+		/* Update the max resource values based on the resource values
+		 * allocated to the VF.
+		 */
+		bnxt_update_max_resources(bp, i);
+		bp->pf->active_vfs++;
+		bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
+	}
+
+	return 0;
+}
+
+static int
+bnxt_process_vf_resc_config_old(struct bnxt *bp, int num_vfs)
+{
+	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+	struct hwrm_func_cfg_input req = {0};
+	int i, rc;
 
+	bnxt_fill_vf_func_cfg_req_old(bp, &req, num_vfs);
+
+	bp->pf->active_vfs = 0;
+	for (i = 0; i < num_vfs; i++) {
 		HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
 		req.flags = rte_cpu_to_le_32(bp->pf->vf_info[i].func_cfg_flags);
 		req.fid = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
@@ -3609,40 +3638,107 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
 
 		if (rc || resp->error_code) {
 			PMD_DRV_LOG(ERR,
-				"Failed to initizlie VF %d\n", i);
+				"Failed to initialize VF %d\n", i);
 			PMD_DRV_LOG(ERR,
 				"Not all VFs available. (%d, %d)\n",
 				rc, resp->error_code);
 			HWRM_UNLOCK();
+
+			/* If the first VF configuration itself fails,
+			 * unregister the vf_fwd_request buffer.
+			 */
+			if (i == 0)
+				bnxt_hwrm_func_buf_unrgtr(bp);
 			break;
 		}
 
 		HWRM_UNLOCK();
 
-		reserve_resources_from_vf(bp, &req, i);
+		/* Update the max resource values based on the resource values
+		 * allocated to the VF.
+		 */
+		bnxt_update_max_resources(bp, i);
 		bp->pf->active_vfs++;
 		bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
 	}
 
+	return 0;
+}
+
+static void
+bnxt_configure_vf_resources(struct bnxt *bp, int num_vfs)
+{
+	if (bp->flags & BNXT_FLAG_NEW_RM)
+		bnxt_process_vf_resc_config_new(bp, num_vfs);
+	else
+		bnxt_process_vf_resc_config_old(bp, num_vfs);
+}
+
+static void
+bnxt_update_pf_resources(struct bnxt *bp,
+			 struct bnxt_pf_resource_info *pf_resc)
+{
+	bp->max_rsscos_ctx = pf_resc->num_rsscos_ctxs;
+	bp->max_stat_ctx = pf_resc->num_stat_ctxs;
+	bp->max_cp_rings = pf_resc->num_cp_rings;
+	bp->max_tx_rings = pf_resc->num_tx_rings;
+	bp->max_rx_rings = pf_resc->num_rx_rings;
+	bp->max_ring_grps = pf_resc->num_hw_ring_grps;
+}
+
+static int32_t
+bnxt_configure_pf_resources(struct bnxt *bp,
+			    struct bnxt_pf_resource_info *pf_resc)
+{
 	/*
-	 * Now configure the PF to use "the rest" of the resources
-	 * We're using STD_TX_RING_MODE here though which will limit the TX
-	 * rings.  This will allow QoS to function properly.  Not setting this
+	 * We're using STD_TX_RING_MODE here which will limit the TX
+	 * rings. This will allow QoS to function properly. Not setting this
 	 * will cause PF rings to break bandwidth settings.
 	 */
-	rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
+	bp->pf->func_cfg_flags &=
+		~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
+		  HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
+	bp->pf->func_cfg_flags |=
+		HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
+	return bnxt_hwrm_pf_func_cfg(bp, pf_resc);
+}
+
+int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
+{
+	struct bnxt_pf_resource_info pf_resc = { 0 };
+	int rc;
+
+	if (!BNXT_PF(bp)) {
+		PMD_DRV_LOG(ERR, "Attempt to allocate VFs on a VF!\n");
+		return -EINVAL;
+	}
+
+	rc = bnxt_hwrm_func_qcaps(bp);
 	if (rc)
-		goto error_free;
+		return rc;
+
+	bnxt_calculate_pf_resources(bp, &pf_resc, num_vfs);
 
-	rc = update_pf_resource_max(bp);
+	rc = bnxt_configure_pf_resources(bp, &pf_resc);
 	if (rc)
-		goto error_free;
+		return rc;
 
-	return rc;
+	rc = bnxt_query_pf_resources(bp, &pf_resc);
+	if (rc)
+		return rc;
 
-error_free:
-	bnxt_hwrm_func_buf_unrgtr(bp);
-	return rc;
+	/*
+	 * Now, create and register a buffer to hold forwarded VF requests
+	 */
+	rc = bnxt_configure_vf_req_buf(bp, num_vfs);
+	if (rc)
+		return rc;
+
+	bnxt_configure_vf_resources(bp, num_vfs);
+
+	bnxt_update_pf_resources(bp, &pf_resc);
+
+	return 0;
 }
 
 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
@@ -3747,23 +3843,24 @@ int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 	return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
 }
 
-int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
+int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp, int num_vfs)
 {
-	int rc = 0;
-	struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
 	struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
+	struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
+	int rc;
 
 	HWRM_PREP(&req, HWRM_FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
 
 	req.req_buf_num_pages = rte_cpu_to_le_16(1);
-	req.req_buf_page_size = rte_cpu_to_le_16(
-			 page_getenum(bp->pf->active_vfs * HWRM_MAX_REQ_LEN));
+	req.req_buf_page_size =
+		rte_cpu_to_le_16(page_getenum(num_vfs * HWRM_MAX_REQ_LEN));
 	req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
 	req.req_buf_page_addr0 =
 		rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf->vf_req_buf));
 	if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
 		PMD_DRV_LOG(ERR,
 			"unable to map buffer address to physical memory\n");
+		HWRM_UNLOCK();
 		return -ENOMEM;
 	}
 
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index e98b1fe41..a7fa7f66b 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -107,6 +107,16 @@ enum bnxt_flow_dir {
 	BNXT_DIR_MAX
 };
 
+struct bnxt_pf_resource_info {
+	uint16_t num_rsscos_ctxs;
+	uint16_t num_stat_ctxs;
+	uint16_t num_tx_rings;
+	uint16_t num_rx_rings;
+	uint16_t num_cp_rings;
+	uint16_t num_l2_ctxs;
+	uint32_t num_hw_ring_grps;
+};
+
 #define BNXT_CTX_VAL_INVAL	0xFFFF
 
 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp,
@@ -127,7 +137,7 @@ int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
 			      void *encaped, size_t ec_size);
 
-int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp);
+int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp, int num_vfs);
 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp);
 int bnxt_hwrm_func_driver_register(struct bnxt *bp);
 int bnxt_hwrm_func_qcaps(struct bnxt *bp);
-- 
2.21.1 (Apple Git-122.3)


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-stable] [PATCH v2 07/12] net/bnxt: handle default vnic change async event
       [not found] <20201009111130.10422-1-somnath.kotur@broadcom.com>
                   ` (5 preceding siblings ...)
  2020-10-10  4:05 ` [dpdk-stable] [PATCH v2 04/12] net/bnxt: fix PMD PF support in SR-IOV mode Ajit Khaparde
@ 2020-10-10  4:05 ` Ajit Khaparde
       [not found] ` <20201010041153.63921-1-ajit.khaparde@broadcom.com>
  7 siblings, 0 replies; 10+ messages in thread
From: Ajit Khaparde @ 2020-10-10  4:05 UTC (permalink / raw)
  To: dev; +Cc: Venkat Duvvuru, stable, Somnath Kotur

From: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>

Currently, we are only registering to this event if the function
is a trusted VF. This patch extends it for PFs as well.

Fixes: 322bd6e70272 ("net/bnxt: add port representor infrastructure")
Cc: stable@dpdk.org

Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
 drivers/net/bnxt/bnxt_cpr.c  | 7 ++-----
 drivers/net/bnxt/bnxt_hwrm.c | 2 +-
 2 files changed, 3 insertions(+), 6 deletions(-)

diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
index 54923948f..91d1ffe46 100644
--- a/drivers/net/bnxt/bnxt_cpr.c
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -50,7 +50,7 @@ static void
 bnxt_process_default_vnic_change(struct bnxt *bp,
 				 struct hwrm_async_event_cmpl *async_cmp)
 {
-	uint16_t fid, vnic_state, parent_id, vf_fid, vf_id;
+	uint16_t vnic_state, vf_fid, vf_id;
 	struct bnxt_representor *vf_rep_bp;
 	struct rte_eth_dev *eth_dev;
 	bool vfr_found = false;
@@ -67,10 +67,7 @@ bnxt_process_default_vnic_change(struct bnxt *bp,
 	if (vnic_state != BNXT_DEFAULT_VNIC_ALLOC)
 		return;
 
-	parent_id = (event_data & BNXT_DEFAULT_VNIC_CHANGE_PF_ID_MASK) >>
-			BNXT_DEFAULT_VNIC_CHANGE_PF_ID_SFT;
-	fid = BNXT_PF(bp) ? bp->fw_fid : bp->parent->fid;
-	if (parent_id != fid || !bp->rep_info)
+	if (!bp->rep_info)
 		return;
 
 	vf_fid = (event_data & BNXT_DEFAULT_VNIC_CHANGE_VF_ID_MASK) >>
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 8133afc74..eef282b69 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -938,7 +938,7 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
 		req.async_event_fwd[1] |=
 			rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION);
 
-	if (BNXT_VF_IS_TRUSTED(bp))
+	if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))
 		req.async_event_fwd[1] |=
 		rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE);
 
-- 
2.21.1 (Apple Git-122.3)


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-stable] [PATCH v2 01/12] net/bnxt: fix the corruption of the session details
       [not found] ` <20201010041153.63921-1-ajit.khaparde@broadcom.com>
@ 2020-10-10  4:11   ` Ajit Khaparde
  2020-10-10  4:11   ` [dpdk-stable] [PATCH v2 04/12] net/bnxt: fix PMD PF support in SR-IOV mode Ajit Khaparde
  2020-10-10  4:11   ` [dpdk-stable] [PATCH v2 07/12] net/bnxt: handle default vnic change async event Ajit Khaparde
  2 siblings, 0 replies; 10+ messages in thread
From: Ajit Khaparde @ 2020-10-10  4:11 UTC (permalink / raw)
  To: dev; +Cc: Kishore Padmanabha, stable, Mike Baucom

From: Kishore Padmanabha <kishore.padmanabha@broadcom.com>

The session details that is shared among multiple ports
need to be outside the bnxt structure.

Fixes: 70e64b27af5b ("net/bnxt: support ULP session manager cleanup")
Cc: stable@dpdk.org

Signed-off-by: Kishore Padmanabha <kishore.padmanabha@broadcom.com>
Reviewed-by: Mike Baucom <michael.baucom@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
 drivers/net/bnxt/tf_ulp/bnxt_ulp.c | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
index 289619411..a4d48c71a 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
@@ -159,7 +159,9 @@ ulp_ctx_session_open(struct bnxt *bp,
 	}
 	if (!session->session_opened) {
 		session->session_opened = 1;
-		session->g_tfp = &bp->tfp;
+		session->g_tfp = rte_zmalloc("bnxt_ulp_session_tfp",
+					     sizeof(struct tf), 0);
+		session->g_tfp->session = bp->tfp.session;
 	}
 	return rc;
 }
@@ -176,6 +178,7 @@ ulp_ctx_session_close(struct bnxt *bp,
 	if (session->session_opened)
 		tf_close_session(&bp->tfp);
 	session->session_opened = 0;
+	rte_free(session->g_tfp);
 	session->g_tfp = NULL;
 }
 
-- 
2.21.1 (Apple Git-122.3)


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-stable] [PATCH v2 04/12] net/bnxt: fix PMD PF support in SR-IOV mode
       [not found] ` <20201010041153.63921-1-ajit.khaparde@broadcom.com>
  2020-10-10  4:11   ` [dpdk-stable] [PATCH v2 01/12] net/bnxt: fix the corruption of the session details Ajit Khaparde
@ 2020-10-10  4:11   ` Ajit Khaparde
  2020-10-10  4:11   ` [dpdk-stable] [PATCH v2 07/12] net/bnxt: handle default vnic change async event Ajit Khaparde
  2 siblings, 0 replies; 10+ messages in thread
From: Ajit Khaparde @ 2020-10-10  4:11 UTC (permalink / raw)
  To: dev; +Cc: Venkat Duvvuru, stable, Somnath Kotur

From: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>

1. Implement HWRM_FUNC_VF_RESOURCE_CFG command and use it to
   reserve resources for VFs when NEW RM is enabled.
2. Invoke PF’s FUNC_CFG before configuring VFs resources.
3. Don’t consider max_rx_em_flows in max_l2_ctx calculation
   when VFs are configured.
4. Issue HWRM_FUNC_QCFG instead of HWRM_FUNC_QCAPS to find
   out the actual allocated resources for VF.
5. Don’t add random mac to the VF.
6. Handle completion type CMPL_BASE_TYPE_HWRM_FWD_REQ instead
   of CMPL_BASE_TYPE_HWRM_FWD_RESP.
7. Don't enable HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE
   when the list of HWRM commands that needs to be forwarded
   to the PF is specified in HWRM_FUNC_DRV_RGTR.
8. Update the HWRM commands list that can be forwared to the
   PF.

Fixes: b7778e8a1c00 ("net/bnxt: refactor to properly allocate resources for PF/VF")
Cc: stable@dpdk.org

Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
 drivers/net/bnxt/bnxt.h        |   6 +-
 drivers/net/bnxt/bnxt_cpr.c    |   6 +-
 drivers/net/bnxt/bnxt_ethdev.c |  40 +--
 drivers/net/bnxt/bnxt_hwrm.c   | 461 ++++++++++++++++++++-------------
 drivers/net/bnxt/bnxt_hwrm.h   |  12 +-
 5 files changed, 309 insertions(+), 216 deletions(-)

diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index eca74486e..a951bca7a 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -167,6 +167,9 @@
 #define	BNXT_DEFAULT_VNIC_CHANGE_VF_ID_SFT		\
 	HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT
 
+#define BNXT_HWRM_CMD_TO_FORWARD(cmd)	\
+		(bp->pf->vf_req_fwd[(cmd) / 32] |= (1 << ((cmd) % 32)))
+
 struct bnxt_led_info {
 	uint8_t	     num_leds;
 	uint8_t      led_id;
@@ -664,9 +667,10 @@ struct bnxt {
 #define BNXT_FW_CAP_IF_CHANGE		BIT(1)
 #define BNXT_FW_CAP_ERROR_RECOVERY	BIT(2)
 #define BNXT_FW_CAP_ERR_RECOVER_RELOAD	BIT(3)
+#define BNXT_FW_CAP_HCOMM_FW_STATUS	BIT(4)
 #define BNXT_FW_CAP_ADV_FLOW_MGMT	BIT(5)
 #define BNXT_FW_CAP_ADV_FLOW_COUNTERS	BIT(6)
-#define BNXT_FW_CAP_HCOMM_FW_STATUS	BIT(7)
+#define BNXT_FW_CAP_LINK_ADMIN		BIT(7)
 
 	pthread_mutex_t         flow_lock;
 
diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
index a3a7e6ab7..54923948f 100644
--- a/drivers/net/bnxt/bnxt_cpr.c
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -239,7 +239,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
 		goto reject;
 	}
 
-	if (bnxt_rcv_msg_from_vf(bp, vf_id, fwd_cmd) == true) {
+	if (bnxt_rcv_msg_from_vf(bp, vf_id, fwd_cmd)) {
 		/*
 		 * In older firmware versions, the MAC had to be all zeros for
 		 * the VF to set it's MAC via hwrm_func_vf_cfg. Set to all
@@ -254,6 +254,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
 				(const uint8_t *)"\x00\x00\x00\x00\x00");
 			}
 		}
+
 		if (fwd_cmd->req_type == HWRM_CFA_L2_SET_RX_MASK) {
 			struct hwrm_cfa_l2_set_rx_mask_input *srm =
 							(void *)fwd_cmd;
@@ -265,6 +266,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
 			    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN |
 			    HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN);
 		}
+
 		/* Forward */
 		rc = bnxt_hwrm_exec_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
 		if (rc) {
@@ -306,7 +308,7 @@ int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp)
 		bnxt_handle_async_event(bp, cmp);
 		evt = 1;
 		break;
-	case CMPL_BASE_TYPE_HWRM_FWD_RESP:
+	case CMPL_BASE_TYPE_HWRM_FWD_REQ:
 		/* Handle HWRM forwarded responses */
 		bnxt_handle_fwd_req(bp, cmp);
 		evt = 1;
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 8b63134c3..b4654ec6a 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -5208,37 +5208,14 @@ static void bnxt_config_vf_req_fwd(struct bnxt *bp)
 	if (!BNXT_PF(bp))
 		return;
 
-#define ALLOW_FUNC(x)	\
-	{ \
-		uint32_t arg = (x); \
-		bp->pf->vf_req_fwd[((arg) >> 5)] &= \
-		~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
-	}
-
-	/* Forward all requests if firmware is new enough */
-	if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
-	     (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
-	    ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
-		memset(bp->pf->vf_req_fwd, 0xff, sizeof(bp->pf->vf_req_fwd));
-	} else {
-		PMD_DRV_LOG(WARNING,
-			    "Firmware too old for VF mailbox functionality\n");
-		memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd));
-	}
+	memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd));
 
-	/*
-	 * The following are used for driver cleanup. If we disallow these,
-	 * VF drivers can't clean up cleanly.
-	 */
-	ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
-	ALLOW_FUNC(HWRM_VNIC_FREE);
-	ALLOW_FUNC(HWRM_RING_FREE);
-	ALLOW_FUNC(HWRM_RING_GRP_FREE);
-	ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
-	ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
-	ALLOW_FUNC(HWRM_STAT_CTX_FREE);
-	ALLOW_FUNC(HWRM_PORT_PHY_QCFG);
-	ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
+	if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN))
+		BNXT_HWRM_CMD_TO_FORWARD(HWRM_PORT_PHY_QCFG);
+	BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_CFG);
+	BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_VF_CFG);
+	BNXT_HWRM_CMD_TO_FORWARD(HWRM_CFA_L2_FILTER_ALLOC);
+	BNXT_HWRM_CMD_TO_FORWARD(HWRM_OEM_CMD);
 }
 
 uint16_t
@@ -6189,7 +6166,10 @@ bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev)
 
 	bnxt_free_int(bp);
 	bnxt_free_mem(bp, reconfig_dev);
+
 	bnxt_hwrm_func_buf_unrgtr(bp);
+	rte_free(bp->pf->vf_req_buf);
+
 	rc = bnxt_hwrm_func_driver_unregister(bp, 0);
 	bp->flags &= ~BNXT_FLAG_REGISTERED;
 	bnxt_free_ctx_mem(bp);
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index faeaf4b5d..8133afc74 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -765,7 +765,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
 	bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
 	bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
 	bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
-	if (!BNXT_CHIP_THOR(bp))
+	if (!BNXT_CHIP_THOR(bp) && !bp->pdev->max_vfs)
 		bp->max_l2_ctx += bp->max_rx_em_flows;
 	/* TODO: For now, do not support VMDq/RFS on VFs. */
 	if (BNXT_PF(bp)) {
@@ -803,6 +803,9 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
 	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE)
 		bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
 
+	if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
+		bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
+
 	HWRM_UNLOCK();
 
 	return rc;
@@ -818,16 +821,15 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
 		if (rc)
 			return rc;
 
+		/* On older FW,
+		 * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.
+		 * But the error can be ignored. Return success.
+		 */
 		rc = bnxt_hwrm_func_resc_qcaps(bp);
 		if (!rc)
 			bp->flags |= BNXT_FLAG_NEW_RM;
 	}
 
-	/* On older FW,
-	 * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.
-	 * But the error can be ignored. Return success.
-	 */
-
 	return 0;
 }
 
@@ -916,14 +918,6 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
 		memcpy(req.vf_req_fwd, bp->pf->vf_req_fwd,
 		       RTE_MIN(sizeof(req.vf_req_fwd),
 			       sizeof(bp->pf->vf_req_fwd)));
-
-		/*
-		 * PF can sniff HWRM API issued by VF. This can be set up by
-		 * linux driver and inherited by the DPDK PF driver. Clear
-		 * this HWRM sniffer list in FW because DPDK PF driver does
-		 * not support this.
-		 */
-		flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE;
 	}
 
 	req.flags = rte_cpu_to_le_32(flags);
@@ -1052,21 +1046,19 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
 
 	HWRM_CHECK_RESULT_SILENT();
 
-	if (BNXT_VF(bp)) {
-		bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
-		bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
-		bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
-		bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
-		bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
-		/* func_resource_qcaps does not return max_rx_em_flows.
-		 * So use the value provided by func_qcaps.
-		 */
-		bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
-		if (!BNXT_CHIP_THOR(bp))
-			bp->max_l2_ctx += bp->max_rx_em_flows;
-		bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
-		bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
-	}
+	bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
+	bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
+	bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
+	bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
+	bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
+	/* func_resource_qcaps does not return max_rx_em_flows.
+	 * So use the value provided by func_qcaps.
+	 */
+	bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
+	if (!BNXT_CHIP_THOR(bp) && !bp->pdev->max_vfs)
+		bp->max_l2_ctx += bp->max_rx_em_flows;
+	bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
+	bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
 	bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
 	bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
 	if (bp->vf_resv_strategy >
@@ -3300,33 +3292,8 @@ int bnxt_hwrm_port_mac_qcfg(struct bnxt *bp)
 	return 0;
 }
 
-static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
-				   struct hwrm_func_qcaps_output *qcaps)
-{
-	qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
-	memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
-	       sizeof(qcaps->mac_address));
-	qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
-	qcaps->max_rx_rings = fcfg->num_rx_rings;
-	qcaps->max_tx_rings = fcfg->num_tx_rings;
-	qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
-	qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
-	qcaps->max_vfs = 0;
-	qcaps->first_vf_id = 0;
-	qcaps->max_vnics = fcfg->num_vnics;
-	qcaps->max_decap_records = 0;
-	qcaps->max_encap_records = 0;
-	qcaps->max_tx_wm_flows = 0;
-	qcaps->max_tx_em_flows = 0;
-	qcaps->max_rx_wm_flows = 0;
-	qcaps->max_rx_em_flows = 0;
-	qcaps->max_flow_id = 0;
-	qcaps->max_mcast_filters = fcfg->num_mcast_filters;
-	qcaps->max_sp_tx_rings = 0;
-	qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
-}
-
-static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
+static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp,
+				 struct bnxt_pf_resource_info *pf_resc)
 {
 	struct hwrm_func_cfg_input req = {0};
 	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
@@ -3345,7 +3312,8 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
 
 	if (BNXT_HAS_RING_GRPS(bp)) {
 		enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
-		req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
+		req.num_hw_ring_grps =
+			rte_cpu_to_le_16(pf_resc->num_hw_ring_grps);
 	} else if (BNXT_HAS_NQ(bp)) {
 		enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
 		req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
@@ -3354,12 +3322,12 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
 	req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
 	req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
 	req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
-	req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
-	req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
-	req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
-	req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
-	req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
-	req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
+	req.num_rsscos_ctxs = rte_cpu_to_le_16(pf_resc->num_rsscos_ctxs);
+	req.num_stat_ctxs = rte_cpu_to_le_16(pf_resc->num_stat_ctxs);
+	req.num_cmpl_rings = rte_cpu_to_le_16(pf_resc->num_cp_rings);
+	req.num_tx_rings = rte_cpu_to_le_16(pf_resc->num_tx_rings);
+	req.num_rx_rings = rte_cpu_to_le_16(pf_resc->num_rx_rings);
+	req.num_l2_ctxs = rte_cpu_to_le_16(pf_resc->num_l2_ctxs);
 	req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
 	req.fid = rte_cpu_to_le_16(0xffff);
 	req.enables = rte_cpu_to_le_32(enables);
@@ -3374,9 +3342,43 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
 	return rc;
 }
 
-static void populate_vf_func_cfg_req(struct bnxt *bp,
-				     struct hwrm_func_cfg_input *req,
-				     int num_vfs)
+/* min values are the guaranteed resources and max values are subject
+ * to availability. The strategy for now is to keep both min & max
+ * values the same.
+ */
+static void
+bnxt_fill_vf_func_cfg_req_new(struct bnxt *bp,
+			      struct hwrm_func_vf_resource_cfg_input *req,
+			      int num_vfs)
+{
+	req->max_rsscos_ctx = rte_cpu_to_le_16(bp->max_rsscos_ctx /
+					       (num_vfs + 1));
+	req->min_rsscos_ctx = req->max_rsscos_ctx;
+	req->max_stat_ctx = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
+	req->min_stat_ctx = req->max_stat_ctx;
+	req->max_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
+					       (num_vfs + 1));
+	req->min_cmpl_rings = req->max_cmpl_rings;
+	req->max_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
+	req->min_tx_rings = req->max_tx_rings;
+	req->max_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
+	req->min_rx_rings = req->max_rx_rings;
+	req->max_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
+	req->min_l2_ctxs = req->max_l2_ctxs;
+	/* TODO: For now, do not support VMDq/RFS on VFs. */
+	req->max_vnics = rte_cpu_to_le_16(1);
+	req->min_vnics = req->max_vnics;
+	req->max_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
+						 (num_vfs + 1));
+	req->min_hw_ring_grps = req->max_hw_ring_grps;
+	req->flags =
+	 rte_cpu_to_le_16(HWRM_FUNC_VF_RESOURCE_CFG_INPUT_FLAGS_MIN_GUARANTEED);
+}
+
+static void
+bnxt_fill_vf_func_cfg_req_old(struct bnxt *bp,
+			      struct hwrm_func_cfg_input *req,
+			      int num_vfs)
 {
 	req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
 			HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
@@ -3407,60 +3409,29 @@ static void populate_vf_func_cfg_req(struct bnxt *bp,
 						 (num_vfs + 1));
 }
 
-static void add_random_mac_if_needed(struct bnxt *bp,
-				     struct hwrm_func_cfg_input *cfg_req,
-				     int vf)
-{
-	struct rte_ether_addr mac;
-
-	if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
-		return;
-
-	if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
-		cfg_req->enables |=
-		rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
-		rte_eth_random_addr(cfg_req->dflt_mac_addr);
-		bp->pf->vf_info[vf].random_mac = true;
-	} else {
-		memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes,
-			RTE_ETHER_ADDR_LEN);
-	}
-}
-
-static int reserve_resources_from_vf(struct bnxt *bp,
-				     struct hwrm_func_cfg_input *cfg_req,
+/* Update the port wide resource values based on how many resources
+ * got allocated to the VF.
+ */
+static int bnxt_update_max_resources(struct bnxt *bp,
 				     int vf)
 {
-	struct hwrm_func_qcaps_input req = {0};
-	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+	struct hwrm_func_qcfg_input req = {0};
+	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
 	int rc;
 
 	/* Get the actual allocated values now */
-	HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
+	HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
 	req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
+	HWRM_CHECK_RESULT();
 
-	if (rc) {
-		PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
-		copy_func_cfg_to_qcaps(cfg_req, resp);
-	} else if (resp->error_code) {
-		rc = rte_le_to_cpu_16(resp->error_code);
-		PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
-		copy_func_cfg_to_qcaps(cfg_req, resp);
-	}
-
-	bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
-	bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
-	bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
-	bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
-	bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
-	bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
-	/*
-	 * TODO: While not supporting VMDq with VFs, max_vnics is always
-	 * forced to 1 in this case
-	 */
-	//bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
-	bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
+	bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
+	bp->max_stat_ctx -= rte_le_to_cpu_16(resp->alloc_stat_ctx);
+	bp->max_cp_rings -= rte_le_to_cpu_16(resp->alloc_cmpl_rings);
+	bp->max_tx_rings -= rte_le_to_cpu_16(resp->alloc_tx_rings);
+	bp->max_rx_rings -= rte_le_to_cpu_16(resp->alloc_rx_rings);
+	bp->max_l2_ctx -= rte_le_to_cpu_16(resp->alloc_l2_ctx);
+	bp->max_ring_grps -= rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
 
 	HWRM_UNLOCK();
 
@@ -3485,7 +3456,8 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
 	return rc;
 }
 
-static int update_pf_resource_max(struct bnxt *bp)
+static int bnxt_query_pf_resources(struct bnxt *bp,
+				   struct bnxt_pf_resource_info *pf_resc)
 {
 	struct hwrm_func_qcfg_input req = {0};
 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
@@ -3497,8 +3469,13 @@ static int update_pf_resource_max(struct bnxt *bp)
 	rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
 	HWRM_CHECK_RESULT();
 
-	/* Only TX ring value reflects actual allocation? TODO */
-	bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
+	pf_resc->num_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
+	pf_resc->num_rsscos_ctxs = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
+	pf_resc->num_stat_ctxs = rte_le_to_cpu_16(resp->alloc_stat_ctx);
+	pf_resc->num_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
+	pf_resc->num_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
+	pf_resc->num_l2_ctxs = rte_le_to_cpu_16(resp->alloc_l2_ctx);
+	pf_resc->num_hw_ring_grps = rte_le_to_cpu_32(resp->alloc_hw_ring_grps);
 	bp->pf->evb_mode = resp->evb_mode;
 
 	HWRM_UNLOCK();
@@ -3506,8 +3483,42 @@ static int update_pf_resource_max(struct bnxt *bp)
 	return rc;
 }
 
+static void
+bnxt_calculate_pf_resources(struct bnxt *bp,
+			    struct bnxt_pf_resource_info *pf_resc,
+			    int num_vfs)
+{
+	if (!num_vfs) {
+		pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx;
+		pf_resc->num_stat_ctxs = bp->max_stat_ctx;
+		pf_resc->num_cp_rings = bp->max_cp_rings;
+		pf_resc->num_tx_rings = bp->max_tx_rings;
+		pf_resc->num_rx_rings = bp->max_rx_rings;
+		pf_resc->num_l2_ctxs = bp->max_l2_ctx;
+		pf_resc->num_hw_ring_grps = bp->max_ring_grps;
+
+		return;
+	}
+
+	pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx / (num_vfs + 1) +
+				   bp->max_rsscos_ctx % (num_vfs + 1);
+	pf_resc->num_stat_ctxs = bp->max_stat_ctx / (num_vfs + 1) +
+				 bp->max_stat_ctx % (num_vfs + 1);
+	pf_resc->num_cp_rings = bp->max_cp_rings / (num_vfs + 1) +
+				bp->max_cp_rings % (num_vfs + 1);
+	pf_resc->num_tx_rings = bp->max_tx_rings / (num_vfs + 1) +
+				bp->max_tx_rings % (num_vfs + 1);
+	pf_resc->num_rx_rings = bp->max_rx_rings / (num_vfs + 1) +
+				bp->max_rx_rings % (num_vfs + 1);
+	pf_resc->num_l2_ctxs = bp->max_l2_ctx / (num_vfs + 1) +
+			       bp->max_l2_ctx % (num_vfs + 1);
+	pf_resc->num_hw_ring_grps = bp->max_ring_grps / (num_vfs + 1) +
+				    bp->max_ring_grps % (num_vfs + 1);
+}
+
 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
 {
+	struct bnxt_pf_resource_info pf_resc = { 0 };
 	int rc;
 
 	if (!BNXT_PF(bp)) {
@@ -3519,82 +3530,100 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
 	if (rc)
 		return rc;
 
+	bnxt_calculate_pf_resources(bp, &pf_resc, 0);
+
 	bp->pf->func_cfg_flags &=
 		~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
 		  HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
 	bp->pf->func_cfg_flags |=
 		HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
-	rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
+	rc = bnxt_hwrm_pf_func_cfg(bp, &pf_resc);
 	rc = __bnxt_hwrm_func_qcaps(bp);
 	return rc;
 }
 
-int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
+static int
+bnxt_configure_vf_req_buf(struct bnxt *bp, int num_vfs)
 {
-	struct hwrm_func_cfg_input req = {0};
-	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
-	int i;
-	size_t sz;
-	int rc = 0;
-	size_t req_buf_sz;
-
-	if (!BNXT_PF(bp)) {
-		PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
-		return -EINVAL;
-	}
-
-	rc = bnxt_hwrm_func_qcaps(bp);
-
-	if (rc)
-		return rc;
-
-	bp->pf->active_vfs = num_vfs;
-
-	/*
-	 * First, configure the PF to only use one TX ring.  This ensures that
-	 * there are enough rings for all VFs.
-	 *
-	 * If we don't do this, when we call func_alloc() later, we will lock
-	 * extra rings to the PF that won't be available during func_cfg() of
-	 * the VFs.
-	 *
-	 * This has been fixed with firmware versions above 20.6.54
-	 */
-	bp->pf->func_cfg_flags &=
-		~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
-		  HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
-	bp->pf->func_cfg_flags |=
-		HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
-	rc = bnxt_hwrm_pf_func_cfg(bp, 1);
-	if (rc)
-		return rc;
+	size_t req_buf_sz, sz;
+	int i, rc;
 
-	/*
-	 * Now, create and register a buffer to hold forwarded VF requests
-	 */
 	req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
 	bp->pf->vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
 		page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
 	if (bp->pf->vf_req_buf == NULL) {
-		rc = -ENOMEM;
-		goto error_free;
+		return -ENOMEM;
 	}
+
 	for (sz = 0; sz < req_buf_sz; sz += getpagesize())
 		rte_mem_lock_page(((char *)bp->pf->vf_req_buf) + sz);
+
 	for (i = 0; i < num_vfs; i++)
 		bp->pf->vf_info[i].req_buf = ((char *)bp->pf->vf_req_buf) +
-					(i * HWRM_MAX_REQ_LEN);
+					     (i * HWRM_MAX_REQ_LEN);
 
-	rc = bnxt_hwrm_func_buf_rgtr(bp);
+	rc = bnxt_hwrm_func_buf_rgtr(bp, num_vfs);
 	if (rc)
-		goto error_free;
+		rte_free(bp->pf->vf_req_buf);
+
+	return rc;
+}
 
-	populate_vf_func_cfg_req(bp, &req, num_vfs);
+static int
+bnxt_process_vf_resc_config_new(struct bnxt *bp, int num_vfs)
+{
+	struct hwrm_func_vf_resource_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+	struct hwrm_func_vf_resource_cfg_input req = {0};
+	int i, rc = 0;
 
+	bnxt_fill_vf_func_cfg_req_new(bp, &req, num_vfs);
 	bp->pf->active_vfs = 0;
 	for (i = 0; i < num_vfs; i++) {
-		add_random_mac_if_needed(bp, &req, i);
+		HWRM_PREP(&req, HWRM_FUNC_VF_RESOURCE_CFG, BNXT_USE_CHIMP_MB);
+		req.vf_id = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
+		rc = bnxt_hwrm_send_message(bp,
+					    &req,
+					    sizeof(req),
+					    BNXT_USE_CHIMP_MB);
+		if (rc || resp->error_code) {
+			PMD_DRV_LOG(ERR,
+				"Failed to initialize VF %d\n", i);
+			PMD_DRV_LOG(ERR,
+				"Not all VFs available. (%d, %d)\n",
+				rc, resp->error_code);
+			HWRM_UNLOCK();
+
+			/* If the first VF configuration itself fails,
+			 * unregister the vf_fwd_request buffer.
+			 */
+			if (i == 0)
+				bnxt_hwrm_func_buf_unrgtr(bp);
+			break;
+		}
+		HWRM_UNLOCK();
+
+		/* Update the max resource values based on the resource values
+		 * allocated to the VF.
+		 */
+		bnxt_update_max_resources(bp, i);
+		bp->pf->active_vfs++;
+		bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
+	}
+
+	return 0;
+}
+
+static int
+bnxt_process_vf_resc_config_old(struct bnxt *bp, int num_vfs)
+{
+	struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+	struct hwrm_func_cfg_input req = {0};
+	int i, rc;
 
+	bnxt_fill_vf_func_cfg_req_old(bp, &req, num_vfs);
+
+	bp->pf->active_vfs = 0;
+	for (i = 0; i < num_vfs; i++) {
 		HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
 		req.flags = rte_cpu_to_le_32(bp->pf->vf_info[i].func_cfg_flags);
 		req.fid = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
@@ -3609,40 +3638,107 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
 
 		if (rc || resp->error_code) {
 			PMD_DRV_LOG(ERR,
-				"Failed to initizlie VF %d\n", i);
+				"Failed to initialize VF %d\n", i);
 			PMD_DRV_LOG(ERR,
 				"Not all VFs available. (%d, %d)\n",
 				rc, resp->error_code);
 			HWRM_UNLOCK();
+
+			/* If the first VF configuration itself fails,
+			 * unregister the vf_fwd_request buffer.
+			 */
+			if (i == 0)
+				bnxt_hwrm_func_buf_unrgtr(bp);
 			break;
 		}
 
 		HWRM_UNLOCK();
 
-		reserve_resources_from_vf(bp, &req, i);
+		/* Update the max resource values based on the resource values
+		 * allocated to the VF.
+		 */
+		bnxt_update_max_resources(bp, i);
 		bp->pf->active_vfs++;
 		bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
 	}
 
+	return 0;
+}
+
+static void
+bnxt_configure_vf_resources(struct bnxt *bp, int num_vfs)
+{
+	if (bp->flags & BNXT_FLAG_NEW_RM)
+		bnxt_process_vf_resc_config_new(bp, num_vfs);
+	else
+		bnxt_process_vf_resc_config_old(bp, num_vfs);
+}
+
+static void
+bnxt_update_pf_resources(struct bnxt *bp,
+			 struct bnxt_pf_resource_info *pf_resc)
+{
+	bp->max_rsscos_ctx = pf_resc->num_rsscos_ctxs;
+	bp->max_stat_ctx = pf_resc->num_stat_ctxs;
+	bp->max_cp_rings = pf_resc->num_cp_rings;
+	bp->max_tx_rings = pf_resc->num_tx_rings;
+	bp->max_rx_rings = pf_resc->num_rx_rings;
+	bp->max_ring_grps = pf_resc->num_hw_ring_grps;
+}
+
+static int32_t
+bnxt_configure_pf_resources(struct bnxt *bp,
+			    struct bnxt_pf_resource_info *pf_resc)
+{
 	/*
-	 * Now configure the PF to use "the rest" of the resources
-	 * We're using STD_TX_RING_MODE here though which will limit the TX
-	 * rings.  This will allow QoS to function properly.  Not setting this
+	 * We're using STD_TX_RING_MODE here which will limit the TX
+	 * rings. This will allow QoS to function properly. Not setting this
 	 * will cause PF rings to break bandwidth settings.
 	 */
-	rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
+	bp->pf->func_cfg_flags &=
+		~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
+		  HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
+	bp->pf->func_cfg_flags |=
+		HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
+	return bnxt_hwrm_pf_func_cfg(bp, pf_resc);
+}
+
+int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
+{
+	struct bnxt_pf_resource_info pf_resc = { 0 };
+	int rc;
+
+	if (!BNXT_PF(bp)) {
+		PMD_DRV_LOG(ERR, "Attempt to allocate VFs on a VF!\n");
+		return -EINVAL;
+	}
+
+	rc = bnxt_hwrm_func_qcaps(bp);
 	if (rc)
-		goto error_free;
+		return rc;
+
+	bnxt_calculate_pf_resources(bp, &pf_resc, num_vfs);
 
-	rc = update_pf_resource_max(bp);
+	rc = bnxt_configure_pf_resources(bp, &pf_resc);
 	if (rc)
-		goto error_free;
+		return rc;
 
-	return rc;
+	rc = bnxt_query_pf_resources(bp, &pf_resc);
+	if (rc)
+		return rc;
 
-error_free:
-	bnxt_hwrm_func_buf_unrgtr(bp);
-	return rc;
+	/*
+	 * Now, create and register a buffer to hold forwarded VF requests
+	 */
+	rc = bnxt_configure_vf_req_buf(bp, num_vfs);
+	if (rc)
+		return rc;
+
+	bnxt_configure_vf_resources(bp, num_vfs);
+
+	bnxt_update_pf_resources(bp, &pf_resc);
+
+	return 0;
 }
 
 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
@@ -3747,23 +3843,24 @@ int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
 	return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
 }
 
-int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
+int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp, int num_vfs)
 {
-	int rc = 0;
-	struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
 	struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
+	struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
+	int rc;
 
 	HWRM_PREP(&req, HWRM_FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
 
 	req.req_buf_num_pages = rte_cpu_to_le_16(1);
-	req.req_buf_page_size = rte_cpu_to_le_16(
-			 page_getenum(bp->pf->active_vfs * HWRM_MAX_REQ_LEN));
+	req.req_buf_page_size =
+		rte_cpu_to_le_16(page_getenum(num_vfs * HWRM_MAX_REQ_LEN));
 	req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
 	req.req_buf_page_addr0 =
 		rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf->vf_req_buf));
 	if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
 		PMD_DRV_LOG(ERR,
 			"unable to map buffer address to physical memory\n");
+		HWRM_UNLOCK();
 		return -ENOMEM;
 	}
 
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index e98b1fe41..a7fa7f66b 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -107,6 +107,16 @@ enum bnxt_flow_dir {
 	BNXT_DIR_MAX
 };
 
+struct bnxt_pf_resource_info {
+	uint16_t num_rsscos_ctxs;
+	uint16_t num_stat_ctxs;
+	uint16_t num_tx_rings;
+	uint16_t num_rx_rings;
+	uint16_t num_cp_rings;
+	uint16_t num_l2_ctxs;
+	uint32_t num_hw_ring_grps;
+};
+
 #define BNXT_CTX_VAL_INVAL	0xFFFF
 
 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp,
@@ -127,7 +137,7 @@ int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
 			      void *encaped, size_t ec_size);
 
-int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp);
+int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp, int num_vfs);
 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp);
 int bnxt_hwrm_func_driver_register(struct bnxt *bp);
 int bnxt_hwrm_func_qcaps(struct bnxt *bp);
-- 
2.21.1 (Apple Git-122.3)


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-stable] [PATCH v2 07/12] net/bnxt: handle default vnic change async event
       [not found] ` <20201010041153.63921-1-ajit.khaparde@broadcom.com>
  2020-10-10  4:11   ` [dpdk-stable] [PATCH v2 01/12] net/bnxt: fix the corruption of the session details Ajit Khaparde
  2020-10-10  4:11   ` [dpdk-stable] [PATCH v2 04/12] net/bnxt: fix PMD PF support in SR-IOV mode Ajit Khaparde
@ 2020-10-10  4:11   ` Ajit Khaparde
  2 siblings, 0 replies; 10+ messages in thread
From: Ajit Khaparde @ 2020-10-10  4:11 UTC (permalink / raw)
  To: dev; +Cc: Venkat Duvvuru, stable, Somnath Kotur

From: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>

Currently, we are only registering to this event if the function
is a trusted VF. This patch extends it for PFs as well.

Fixes: 322bd6e70272 ("net/bnxt: add port representor infrastructure")
Cc: stable@dpdk.org

Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
 drivers/net/bnxt/bnxt_cpr.c  | 7 ++-----
 drivers/net/bnxt/bnxt_hwrm.c | 2 +-
 2 files changed, 3 insertions(+), 6 deletions(-)

diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
index 54923948f..91d1ffe46 100644
--- a/drivers/net/bnxt/bnxt_cpr.c
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -50,7 +50,7 @@ static void
 bnxt_process_default_vnic_change(struct bnxt *bp,
 				 struct hwrm_async_event_cmpl *async_cmp)
 {
-	uint16_t fid, vnic_state, parent_id, vf_fid, vf_id;
+	uint16_t vnic_state, vf_fid, vf_id;
 	struct bnxt_representor *vf_rep_bp;
 	struct rte_eth_dev *eth_dev;
 	bool vfr_found = false;
@@ -67,10 +67,7 @@ bnxt_process_default_vnic_change(struct bnxt *bp,
 	if (vnic_state != BNXT_DEFAULT_VNIC_ALLOC)
 		return;
 
-	parent_id = (event_data & BNXT_DEFAULT_VNIC_CHANGE_PF_ID_MASK) >>
-			BNXT_DEFAULT_VNIC_CHANGE_PF_ID_SFT;
-	fid = BNXT_PF(bp) ? bp->fw_fid : bp->parent->fid;
-	if (parent_id != fid || !bp->rep_info)
+	if (!bp->rep_info)
 		return;
 
 	vf_fid = (event_data & BNXT_DEFAULT_VNIC_CHANGE_VF_ID_MASK) >>
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 8133afc74..eef282b69 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -938,7 +938,7 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
 		req.async_event_fwd[1] |=
 			rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION);
 
-	if (BNXT_VF_IS_TRUSTED(bp))
+	if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))
 		req.async_event_fwd[1] |=
 		rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE);
 
-- 
2.21.1 (Apple Git-122.3)


^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2020-10-10  4:13 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <20201009111130.10422-1-somnath.kotur@broadcom.com>
2020-10-09 11:11 ` [dpdk-stable] [PATCH 01/13] net/bnxt: fix the corruption of the session details Somnath Kotur
2020-10-09 11:11 ` [dpdk-stable] [PATCH 04/13] net/bnxt: fixes for PMD PF support in SR-IOV mode Somnath Kotur
2020-10-09 11:11 ` [dpdk-stable] [PATCH 07/13] net/bnxt: register PF for default vnic change async event Somnath Kotur
2020-10-09 11:11 ` [dpdk-stable] [PATCH 13/13] net/bnxt: remove parent fid validation in vnic change event processing Somnath Kotur
2020-10-10  4:05 ` [dpdk-stable] [PATCH v2 01/12] net/bnxt: fix the corruption of the session details Ajit Khaparde
2020-10-10  4:05 ` [dpdk-stable] [PATCH v2 04/12] net/bnxt: fix PMD PF support in SR-IOV mode Ajit Khaparde
2020-10-10  4:05 ` [dpdk-stable] [PATCH v2 07/12] net/bnxt: handle default vnic change async event Ajit Khaparde
     [not found] ` <20201010041153.63921-1-ajit.khaparde@broadcom.com>
2020-10-10  4:11   ` [dpdk-stable] [PATCH v2 01/12] net/bnxt: fix the corruption of the session details Ajit Khaparde
2020-10-10  4:11   ` [dpdk-stable] [PATCH v2 04/12] net/bnxt: fix PMD PF support in SR-IOV mode Ajit Khaparde
2020-10-10  4:11   ` [dpdk-stable] [PATCH v2 07/12] net/bnxt: handle default vnic change async event Ajit Khaparde

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).