DPDK patches and discussions
 help / color / mirror / Atom feed
From: Shahed Shaikh <shshaikh@marvell.com>
To: <dev@dpdk.org>
Cc: <rmody@marvell.com>, <ferruh.yigit@intel.com>,
	<GR-Everest-DPDK-Dev@marvell.com>, <stable@dpdk.org>
Subject: [dpdk-dev] [PATCH 5/5] net/bnx2x: fix supported max Rx and Tx descriptor count
Date: Tue, 4 Jun 2019 11:53:52 -0700	[thread overview]
Message-ID: <20190604185352.28629-5-shshaikh@marvell.com> (raw)
In-Reply-To: <20190604185352.28629-1-shshaikh@marvell.com>

Driver does not provide limit on number Rx and Tx descriptors per queue,
this may result in application configuring 64k descriptors (default set
by rte_eth_dev_info_get()) and further result in issues in PMD and HW
flows due to unsupported number.

Fixes: 540a211084a7 ("bnx2x: driver core")
Cc: stable@dpdk.org

Signed-off-by: Shahed Shaikh <shshaikh@marvell.com>
---
 drivers/net/bnx2x/bnx2x.h        | 10 ++++++++--
 drivers/net/bnx2x/bnx2x_ethdev.c |  5 +++++
 2 files changed, 13 insertions(+), 2 deletions(-)

diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index ef1688ff3..e4b4ecf1e 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -155,13 +155,14 @@ struct bnx2x_device_type {
  * Transmit Buffer Descriptor (tx_bd) definitions*
  */
 /* NUM_TX_PAGES must be a power of 2. */
+#define NUM_TX_PAGES		 16
 #define TOTAL_TX_BD_PER_PAGE     (BNX2X_PAGE_SIZE / sizeof(union eth_tx_bd_types)) /*  256 */
 #define USABLE_TX_BD_PER_PAGE    (TOTAL_TX_BD_PER_PAGE - 1)                      /*  255 */
 
 #define TOTAL_TX_BD(q)           (TOTAL_TX_BD_PER_PAGE * q->nb_tx_pages)         /*  512 */
 #define USABLE_TX_BD(q)          (USABLE_TX_BD_PER_PAGE * q->nb_tx_pages)        /*  510 */
 #define MAX_TX_BD(q)             (TOTAL_TX_BD(q) - 1)                            /*  511 */
-
+#define MAX_TX_AVAIL		 (USABLE_TX_BD_PER_PAGE * NUM_TX_PAGES - 2)
 #define NEXT_TX_BD(x)                                                   \
 	((((x) & USABLE_TX_BD_PER_PAGE) ==                              \
 	  (USABLE_TX_BD_PER_PAGE - 1)) ? (x) + 2 : (x) + 1)
@@ -182,13 +183,14 @@ struct bnx2x_device_type {
 /*
  * Receive Buffer Descriptor (rx_bd) definitions*
  */
-//#define NUM_RX_PAGES            1
+#define MAX_RX_PAGES            8
 #define TOTAL_RX_BD_PER_PAGE    (BNX2X_PAGE_SIZE / sizeof(struct eth_rx_bd))      /*  512 */
 #define USABLE_RX_BD_PER_PAGE   (TOTAL_RX_BD_PER_PAGE - 2)                      /*  510 */
 #define RX_BD_PER_PAGE_MASK     (TOTAL_RX_BD_PER_PAGE - 1)                      /*  511 */
 #define TOTAL_RX_BD(q)          (TOTAL_RX_BD_PER_PAGE * q->nb_rx_pages)         /*  512 */
 #define USABLE_RX_BD(q)         (USABLE_RX_BD_PER_PAGE * q->nb_rx_pages)        /*  510 */
 #define MAX_RX_BD(q)            (TOTAL_RX_BD(q) - 1)                            /*  511 */
+#define MAX_RX_AVAIL		(USABLE_RX_BD_PER_PAGE * MAX_RX_PAGES - 2)
 #define RX_BD_NEXT_PAGE_DESC_CNT 2
 
 #define NEXT_RX_BD(x)                                                   \
@@ -244,6 +246,10 @@ struct bnx2x_device_type {
 #define MIN_RX_AVAIL(sc)				\
 	((sc)->dropless_fc ? BD_TH_HI(sc) + 128 : 128)
 
+#define MIN_RX_SIZE_NONTPA_HW	ETH_MIN_RX_CQES_WITHOUT_TPA
+#define MIN_RX_SIZE_NONTPA	(RTE_MAX((uint32_t)MIN_RX_SIZE_NONTPA_HW,\
+					(uint32_t)MIN_RX_AVAIL(sc)))
+
 /*
  * dropless fc calculations for RCQs
  * Number of RCQs should be as number of buffers in BRB:
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c
index df5634fda..5be487765 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.c
+++ b/drivers/net/bnx2x/bnx2x_ethdev.c
@@ -480,6 +480,7 @@ static void
 bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
 	struct bnx2x_softc *sc = dev->data->dev_private;
+
 	dev_info->max_rx_queues  = sc->max_rx_queues;
 	dev_info->max_tx_queues  = sc->max_tx_queues;
 	dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
@@ -487,6 +488,10 @@ bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mac_addrs  = BNX2X_MAX_MAC_ADDRS;
 	dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
 	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+	dev_info->rx_desc_lim.nb_max = MAX_RX_AVAIL;
+	dev_info->rx_desc_lim.nb_min = MIN_RX_SIZE_NONTPA;
+	dev_info->tx_desc_lim.nb_max = MAX_TX_AVAIL;
 }
 
 static int
-- 
2.12.3


  parent reply	other threads:[~2019-06-04 18:55 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-04 18:53 [dpdk-dev] [PATCH 1/5] net/bnx2x: fix packet drop Shahed Shaikh
2019-06-04 18:53 ` [dpdk-dev] [PATCH 2/5] net/bnx2x: fix interrupt flood Shahed Shaikh
2019-06-04 18:53 ` [dpdk-dev] [PATCH 3/5] net/bnx2x: fix memory leak Shahed Shaikh
2019-06-04 18:53 ` [dpdk-dev] [PATCH 4/5] net/bnx2x: fix link inconsistent state Shahed Shaikh
2019-06-04 18:53 ` Shahed Shaikh [this message]
2019-06-05  5:17 ` [dpdk-dev] [PATCH 1/5] net/bnx2x: fix packet drop Rasesh Mody
2019-06-11 11:12   ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190604185352.28629-5-shshaikh@marvell.com \
    --to=shshaikh@marvell.com \
    --cc=GR-Everest-DPDK-Dev@marvell.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=rmody@marvell.com \
    --cc=stable@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).