DPDK patches and discussions
 help / color / mirror / Atom feed
From: Kalesh A P <kalesh-anakkur.purayil@broadcom.com>
To: dev@dpdk.org
Cc: ferruh.yigit@intel.com, ajit.khaparde@broadcom.com
Subject: [dpdk-dev] [PATCH 4/4] net/bnxt: fix VF resource allocation strategy
Date: Thu, 20 Jan 2022 14:42:28 +0530	[thread overview]
Message-ID: <20220120091228.7076-5-kalesh-anakkur.purayil@broadcom.com> (raw)
In-Reply-To: <20220120091228.7076-1-kalesh-anakkur.purayil@broadcom.com>

From: Ajit Khaparde <ajit.khaparde@broadcom.com>

1. VFs need a notification queue to handle async messages.
But the current logic does not reserve a notification queue leading
to initialization failure in some cases.
2. With the current logic, DPDK PF driver reserves only one VNIC
to the VFs leading to initialization failure with more than 1 RXQs.

Added logic to distribute number of NQs and VNICs from the pool
across VFs and PF.

While reserving resources for the VFs, the strategy is to keep
both min & max values the same. This could result in a failure
when there isn't enough resources to satisfy the request.
Hence fixed to instruct the FW to not reserve all minimum
resources requested for the VF. The VF driver can request the FW
for the allocated resources during probe.

Fixes: b7778e8a1c00 ("net/bnxt: refactor to properly allocate resources for PF/VF")
Cc: stable@dpdk.org

Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
---
 drivers/net/bnxt/bnxt_hwrm.c | 32 +++++++++++++++++---------------
 drivers/net/bnxt/bnxt_hwrm.h |  2 ++
 2 files changed, 19 insertions(+), 15 deletions(-)

diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 5418fa1..b4aeec5 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -902,15 +902,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
 	bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
 	if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
 		bp->max_l2_ctx += bp->max_rx_em_flows;
-	/* TODO: For now, do not support VMDq/RFS on VFs. */
-	if (BNXT_PF(bp)) {
-		if (bp->pf->max_vfs)
-			bp->max_vnics = 1;
-		else
-			bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
-	} else {
-		bp->max_vnics = 1;
-	}
+	bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
 	PMD_DRV_LOG(DEBUG, "Max l2_cntxts is %d vnics is %d\n",
 		    bp->max_l2_ctx, bp->max_vnics);
 	bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
@@ -3495,7 +3487,7 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp,
 			rte_cpu_to_le_16(pf_resc->num_hw_ring_grps);
 	} else if (BNXT_HAS_NQ(bp)) {
 		enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
-		req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
+		req.num_msix = rte_cpu_to_le_16(pf_resc->num_nq_rings);
 	}
 
 	req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
@@ -3508,7 +3500,7 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp,
 	req.num_tx_rings = rte_cpu_to_le_16(pf_resc->num_tx_rings);
 	req.num_rx_rings = rte_cpu_to_le_16(pf_resc->num_rx_rings);
 	req.num_l2_ctxs = rte_cpu_to_le_16(pf_resc->num_l2_ctxs);
-	req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
+	req.num_vnics = rte_cpu_to_le_16(pf_resc->num_vnics);
 	req.fid = rte_cpu_to_le_16(0xffff);
 	req.enables = rte_cpu_to_le_32(enables);
 
@@ -3545,14 +3537,12 @@ bnxt_fill_vf_func_cfg_req_new(struct bnxt *bp,
 	req->min_rx_rings = req->max_rx_rings;
 	req->max_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
 	req->min_l2_ctxs = req->max_l2_ctxs;
-	/* TODO: For now, do not support VMDq/RFS on VFs. */
-	req->max_vnics = rte_cpu_to_le_16(1);
+	req->max_vnics = rte_cpu_to_le_16(bp->max_vnics / (num_vfs + 1));
 	req->min_vnics = req->max_vnics;
 	req->max_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
 						 (num_vfs + 1));
 	req->min_hw_ring_grps = req->max_hw_ring_grps;
-	req->flags =
-	 rte_cpu_to_le_16(HWRM_FUNC_VF_RESOURCE_CFG_INPUT_FLAGS_MIN_GUARANTEED);
+	req->max_msix = rte_cpu_to_le_16(bp->max_nq_rings / (num_vfs + 1));
 }
 
 static void
@@ -3612,6 +3602,8 @@ static int bnxt_update_max_resources(struct bnxt *bp,
 	bp->max_rx_rings -= rte_le_to_cpu_16(resp->alloc_rx_rings);
 	bp->max_l2_ctx -= rte_le_to_cpu_16(resp->alloc_l2_ctx);
 	bp->max_ring_grps -= rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
+	bp->max_nq_rings -= rte_le_to_cpu_16(resp->alloc_msix);
+	bp->max_vnics -= rte_le_to_cpu_16(resp->alloc_vnics);
 
 	HWRM_UNLOCK();
 
@@ -3685,6 +3677,8 @@ static int bnxt_query_pf_resources(struct bnxt *bp,
 	pf_resc->num_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
 	pf_resc->num_l2_ctxs = rte_le_to_cpu_16(resp->alloc_l2_ctx);
 	pf_resc->num_hw_ring_grps = rte_le_to_cpu_32(resp->alloc_hw_ring_grps);
+	pf_resc->num_nq_rings = rte_le_to_cpu_32(resp->alloc_msix);
+	pf_resc->num_vnics = rte_le_to_cpu_16(resp->alloc_vnics);
 	bp->pf->evb_mode = resp->evb_mode;
 
 	HWRM_UNLOCK();
@@ -3705,6 +3699,8 @@ bnxt_calculate_pf_resources(struct bnxt *bp,
 		pf_resc->num_rx_rings = bp->max_rx_rings;
 		pf_resc->num_l2_ctxs = bp->max_l2_ctx;
 		pf_resc->num_hw_ring_grps = bp->max_ring_grps;
+		pf_resc->num_nq_rings = bp->max_nq_rings;
+		pf_resc->num_vnics = bp->max_vnics;
 
 		return;
 	}
@@ -3723,6 +3719,10 @@ bnxt_calculate_pf_resources(struct bnxt *bp,
 			       bp->max_l2_ctx % (num_vfs + 1);
 	pf_resc->num_hw_ring_grps = bp->max_ring_grps / (num_vfs + 1) +
 				    bp->max_ring_grps % (num_vfs + 1);
+	pf_resc->num_nq_rings = bp->max_nq_rings / (num_vfs + 1) +
+				bp->max_nq_rings % (num_vfs + 1);
+	pf_resc->num_vnics = bp->max_vnics / (num_vfs + 1) +
+				bp->max_vnics % (num_vfs + 1);
 }
 
 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
@@ -3898,6 +3898,8 @@ bnxt_update_pf_resources(struct bnxt *bp,
 	bp->max_tx_rings = pf_resc->num_tx_rings;
 	bp->max_rx_rings = pf_resc->num_rx_rings;
 	bp->max_ring_grps = pf_resc->num_hw_ring_grps;
+	bp->max_nq_rings = pf_resc->num_nq_rings;
+	bp->max_vnics = pf_resc->num_vnics;
 }
 
 static int32_t
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index 21e1b7a..63f8d8c 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -114,6 +114,8 @@ struct bnxt_pf_resource_info {
 	uint16_t num_rx_rings;
 	uint16_t num_cp_rings;
 	uint16_t num_l2_ctxs;
+	uint16_t num_nq_rings;
+	uint16_t num_vnics;
 	uint32_t num_hw_ring_grps;
 };
 
-- 
2.10.1


  parent reply	other threads:[~2022-01-20  8:53 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-01-20  9:12 [dpdk-dev] [PATCH 0/4] bnxt fixes Kalesh A P
2022-01-20  9:12 ` [dpdk-dev] [PATCH 1/4] net/bnxt: fix check for autoneg enablement Kalesh A P
2022-01-20  9:12 ` [dpdk-dev] [PATCH 2/4] net/bnxt: handle ring cleanup in case of error Kalesh A P
2022-01-20  9:12 ` [dpdk-dev] [PATCH 3/4] net/bnxt: fix to alloc the memzone per VNIC Kalesh A P
2022-01-20  9:12 ` Kalesh A P [this message]
2022-01-25  5:00 ` [dpdk-dev] [PATCH 0/4] bnxt fixes Ajit Khaparde

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220120091228.7076-5-kalesh-anakkur.purayil@broadcom.com \
    --to=kalesh-anakkur.purayil@broadcom.com \
    --cc=ajit.khaparde@broadcom.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).