From: <skori@marvell.com>
To: Nithin Dabilpuram <ndabilpuram@marvell.com>,
Kiran Kumar K <kirankumark@marvell.com>,
Sunil Kumar Kori <skori@marvell.com>,
Satha Rao <skoteshwar@marvell.com>
Cc: <dev@dpdk.org>
Subject: [PATCH v2 3/3] net/cnxk: support congestion management ops
Date: Thu, 29 Sep 2022 15:24:54 +0530 [thread overview]
Message-ID: <20220929095455.2173071-3-skori@marvell.com> (raw)
In-Reply-To: <20220929095455.2173071-1-skori@marvell.com>
From: Sunil Kumar Kori <skori@marvell.com>
Support congestion management.
Depends-on: patch-24902 ("ethdev: support congestion management")
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
v1..v2:
- Rebase on top of the dpdk-next-net-mrvl/for-next-net
- Aligned with congestion management v3 spec
doc/guides/nics/features/cnxk.ini | 1 +
doc/guides/rel_notes/release_22_11.rst | 4 +
drivers/net/cnxk/cnxk_ethdev.c | 4 +
drivers/net/cnxk/cnxk_ethdev.h | 12 +++
drivers/net/cnxk/cnxk_ethdev_cman.c | 140 +++++++++++++++++++++++++
drivers/net/cnxk/meson.build | 1 +
6 files changed, 162 insertions(+)
create mode 100644 drivers/net/cnxk/cnxk_ethdev_cman.c
diff --git a/doc/guides/nics/features/cnxk.ini b/doc/guides/nics/features/cnxk.ini
index 1876fe86c7..bbb90e9527 100644
--- a/doc/guides/nics/features/cnxk.ini
+++ b/doc/guides/nics/features/cnxk.ini
@@ -41,6 +41,7 @@ Rx descriptor status = Y
Tx descriptor status = Y
Basic stats = Y
Stats per queue = Y
+Congestion management = Y
Extended stats = Y
FW version = Y
Module EEPROM dump = Y
diff --git a/doc/guides/rel_notes/release_22_11.rst b/doc/guides/rel_notes/release_22_11.rst
index 2e076ba2ad..18206587d8 100644
--- a/doc/guides/rel_notes/release_22_11.rst
+++ b/doc/guides/rel_notes/release_22_11.rst
@@ -59,6 +59,10 @@ New Features
* Added support to set device link down/up.
+* **Added Random Early Discard(RED) based congestion management for CN9K & CN10K.**
+
+ * Added support to set/get congestion management configuration.
+ * Added support to get congestion management capabilities.
Removed Items
-------------
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 48170147a4..2d46938d68 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1678,6 +1678,10 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
.tm_ops_get = cnxk_nix_tm_ops_get,
.mtr_ops_get = cnxk_nix_mtr_ops_get,
.eth_dev_priv_dump = cnxk_nix_eth_dev_priv_dump,
+ .cman_info_get = cnxk_nix_cman_info_get,
+ .cman_config_init = cnxk_nix_cman_config_init,
+ .cman_config_set = cnxk_nix_cman_config_set,
+ .cman_config_get = cnxk_nix_cman_config_get,
};
static int
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index c09e9bff8e..fc72ae917c 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -417,6 +417,9 @@ struct cnxk_eth_dev {
struct cnxk_mtr_policy mtr_policy;
struct cnxk_mtr mtr;
+ /* Congestion Management */
+ struct rte_eth_cman_config cman_cfg;
+
/* Rx burst for cleanup(Only Primary) */
eth_rx_burst_t rx_pkt_burst_no_offload;
@@ -649,6 +652,15 @@ cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
int cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uint32_t buf_sz, uint32_t nb_bufs,
bool destroy);
+/* Congestion Management */
+int cnxk_nix_cman_info_get(struct rte_eth_dev *dev, struct rte_eth_cman_info *info);
+
+int cnxk_nix_cman_config_init(struct rte_eth_dev *dev, struct rte_eth_cman_config *config);
+
+int cnxk_nix_cman_config_set(struct rte_eth_dev *dev, const struct rte_eth_cman_config *config);
+
+int cnxk_nix_cman_config_get(struct rte_eth_dev *dev, struct rte_eth_cman_config *config);
+
/* Other private functions */
int nix_recalc_mtu(struct rte_eth_dev *eth_dev);
int nix_mtr_validate(struct rte_eth_dev *dev, uint32_t id);
diff --git a/drivers/net/cnxk/cnxk_ethdev_cman.c b/drivers/net/cnxk/cnxk_ethdev_cman.c
new file mode 100644
index 0000000000..d5e647c64d
--- /dev/null
+++ b/drivers/net/cnxk/cnxk_ethdev_cman.c
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2022 Marvell International Ltd.
+ */
+
+#include "cnxk_ethdev.h"
+
+#define CNXK_NIX_CMAN_RED_MIN_THRESH 75
+#define CNXK_NIX_CMAN_RED_MAX_THRESH 95
+
+int
+cnxk_nix_cman_info_get(struct rte_eth_dev *dev, struct rte_eth_cman_info *info)
+{
+ RTE_SET_USED(dev);
+
+ info->modes_supported = RTE_CMAN_RED;
+ info->objs_supported = RTE_ETH_CMAN_OBJ_RX_QUEUE | RTE_ETH_CMAN_OBJ_RX_QUEUE_MEMPOOL;
+
+ return 0;
+}
+
+int
+cnxk_nix_cman_config_init(struct rte_eth_dev *dev, struct rte_eth_cman_config *config)
+{
+ RTE_SET_USED(dev);
+
+ memset(config, 0, sizeof(struct rte_eth_cman_config));
+
+ config->obj = RTE_ETH_CMAN_OBJ_RX_QUEUE;
+ config->mode = RTE_CMAN_RED;
+ config->mode_param.red.min_th = CNXK_NIX_CMAN_RED_MIN_THRESH;
+ config->mode_param.red.max_th = CNXK_NIX_CMAN_RED_MAX_THRESH;
+ return 0;
+}
+
+static int
+nix_cman_config_validate(struct rte_eth_dev *eth_dev, const struct rte_eth_cman_config *config)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct rte_eth_cman_info info;
+
+ memset(&info, 0, sizeof(struct rte_eth_cman_info));
+ cnxk_nix_cman_info_get(eth_dev, &info);
+
+ if (!(config->obj & info.objs_supported)) {
+ plt_err("Invalid object");
+ return -EINVAL;
+ }
+
+ if (!(config->mode & info.modes_supported)) {
+ plt_err("Invalid mode");
+ return -EINVAL;
+ }
+
+ if (config->obj_param.rx_queue >= dev->nb_rxq) {
+ plt_err("Invalid queue ID. Queue = %u", config->obj_param.rx_queue);
+ return -EINVAL;
+ }
+
+ if (config->mode_param.red.min_th > CNXK_NIX_CMAN_RED_MAX_THRESH) {
+ plt_err("Invalid RED minimum threshold. min_th = %u",
+ config->mode_param.red.min_th);
+ return -EINVAL;
+ }
+
+ if (config->mode_param.red.max_th > CNXK_NIX_CMAN_RED_MAX_THRESH) {
+ plt_err("Invalid RED maximum threshold. max_th = %u",
+ config->mode_param.red.max_th);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+cnxk_nix_cman_config_set(struct rte_eth_dev *eth_dev, const struct rte_eth_cman_config *config)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ uint8_t drop, pass, shift;
+ uint8_t min_th, max_th;
+ struct roc_nix_cq *cq;
+ struct roc_nix_rq *rq;
+ bool is_mempool;
+ uint64_t buf_cnt;
+ int rc;
+
+ rc = nix_cman_config_validate(eth_dev, config);
+ if (rc)
+ return rc;
+
+ cq = &dev->cqs[config->obj_param.rx_queue];
+ rq = &dev->rqs[config->obj_param.rx_queue];
+ is_mempool = config->obj & RTE_ETH_CMAN_OBJ_RX_QUEUE_MEMPOOL ? true : false;
+ min_th = config->mode_param.red.min_th;
+ max_th = config->mode_param.red.max_th;
+
+ if (is_mempool) {
+ buf_cnt = roc_npa_aura_op_limit_get(rq->aura_handle);
+ shift = plt_log2_u32(buf_cnt);
+ shift = shift < 8 ? 0 : shift - 8;
+ pass = (buf_cnt >> shift) - ((buf_cnt * min_th / 100) >> shift);
+ drop = (buf_cnt >> shift) - ((buf_cnt * max_th / 100) >> shift);
+ rq->red_pass = pass;
+ rq->red_drop = drop;
+
+ if (rq->spb_ena) {
+ buf_cnt = roc_npa_aura_op_limit_get(rq->spb_aura_handle);
+ shift = plt_log2_u32(buf_cnt);
+ shift = shift < 8 ? 0 : shift - 8;
+ pass = (buf_cnt >> shift) - ((buf_cnt * min_th / 100) >> shift);
+ drop = (buf_cnt >> shift) - ((buf_cnt * max_th / 100) >> shift);
+ rq->spb_red_pass = pass;
+ rq->spb_red_drop = drop;
+ }
+ } else {
+ shift = plt_log2_u32(cq->nb_desc);
+ shift = shift < 8 ? 0 : shift - 8;
+ pass = 256 - ((cq->nb_desc * min_th / 100) >> shift);
+ drop = 256 - ((cq->nb_desc * max_th / 100) >> shift);
+
+ rq->xqe_red_pass = pass;
+ rq->xqe_red_drop = drop;
+ }
+
+ rc = roc_nix_rq_cman_config(nix, rq);
+ if (rc)
+ return rc;
+
+ memcpy(&dev->cman_cfg, config, sizeof(struct rte_eth_cman_config));
+ return 0;
+}
+
+int
+cnxk_nix_cman_config_get(struct rte_eth_dev *eth_dev, struct rte_eth_cman_config *config)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+
+ memcpy(config, &dev->cman_cfg, sizeof(struct rte_eth_cman_config));
+ return 0;
+}
diff --git a/drivers/net/cnxk/meson.build b/drivers/net/cnxk/meson.build
index f347e98fce..9253e8d0ab 100644
--- a/drivers/net/cnxk/meson.build
+++ b/drivers/net/cnxk/meson.build
@@ -10,6 +10,7 @@ endif
sources = files(
'cnxk_ethdev.c',
+ 'cnxk_ethdev_cman.c',
'cnxk_ethdev_devargs.c',
'cnxk_ethdev_mtr.c',
'cnxk_ethdev_ops.c',
--
2.25.1
next prev parent reply other threads:[~2022-09-29 9:55 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-09-19 12:41 [PATCH 1/3] app/testpmd: support congestion management CLIs skori
2022-09-19 12:41 ` [PATCH 2/3] common/cnxk: add congestion management ROC APIs skori
2022-09-19 12:41 ` [PATCH 3/3] net/cnxk: support congestion management ops skori
2022-09-29 9:54 ` [PATCH v2 1/3] app/testpmd: support congestion management CLIs skori
2022-09-29 9:54 ` [PATCH v2 2/3] common/cnxk: add congestion management ROC APIs skori
2022-10-11 6:27 ` Sunil Kumar Kori
2022-09-29 9:54 ` skori [this message]
2022-10-11 6:29 ` [PATCH v2 3/3] net/cnxk: support congestion management ops Sunil Kumar Kori
2022-10-12 6:43 ` Jerin Jacob
2022-10-12 9:01 ` [PATCH v2 1/3] app/testpmd: support congestion management CLIs Sunil Kumar Kori
2022-11-06 10:08 ` Andrew Rybchenko
2022-11-07 7:12 ` Singh, Aman Deep
2022-11-29 7:54 ` [EXT] " Sunil Kumar Kori
2022-11-29 8:04 ` [PATCH v3 1/1] " skori
2022-12-02 7:56 ` [PATCH v4 " skori
2022-12-07 14:19 ` Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220929095455.2173071-3-skori@marvell.com \
--to=skori@marvell.com \
--cc=dev@dpdk.org \
--cc=kirankumark@marvell.com \
--cc=ndabilpuram@marvell.com \
--cc=skoteshwar@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).