From: <skori@marvell.com>
To: Nithin Dabilpuram <ndabilpuram@marvell.com>,
Kiran Kumar K <kirankumark@marvell.com>,
Sunil Kumar Kori <skori@marvell.com>,
Satha Rao <skoteshwar@marvell.com>
Cc: <dev@dpdk.org>
Subject: [PATCH v11 2/2] net/cnxk: support priority flow control
Date: Tue, 22 Feb 2022 16:07:50 +0530 [thread overview]
Message-ID: <20220222103750.866907-2-skori@marvell.com> (raw)
In-Reply-To: <20220222103750.866907-1-skori@marvell.com>
From: Sunil Kumar Kori <skori@marvell.com>
Adds support for priority flow control support for CNXK
platforms.
Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
---
v1..v2:
- fix application restart issue.
v2..v3:
- fix pause quanta configuration for cn10k.
- fix review comments.
v3..v4:
- fix PFC configuration with other type of TM tree
i.e. default, user and rate limit tree.
v4..v5:
- rebase on top of tree.
v5..v6:
- fix review comments
v6..v7:
- use correct FC mode flags
v7..v8:
- rebase on top of 22.03-rc1
v8..v9:
- update documentation and release notes
v9..v10:
- fix build error on RHEL
v10..v11:
- rebase to dpdk-next-net-mrvl branch
doc/guides/nics/cnxk.rst | 1 +
doc/guides/rel_notes/release_22_03.rst | 4 +
drivers/net/cnxk/cnxk_ethdev.c | 32 +++++
drivers/net/cnxk/cnxk_ethdev.h | 20 +++
drivers/net/cnxk/cnxk_ethdev_ops.c | 188 +++++++++++++++++++++++--
5 files changed, 236 insertions(+), 9 deletions(-)
diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst
index 27a94204cb..c9467f5d2a 100644
--- a/doc/guides/nics/cnxk.rst
+++ b/doc/guides/nics/cnxk.rst
@@ -36,6 +36,7 @@ Features of the CNXK Ethdev PMD are:
- Support Rx interrupt
- Inline IPsec processing support
- Ingress meter support
+- Queue based priority flow control support
Prerequisites
-------------
diff --git a/doc/guides/rel_notes/release_22_03.rst b/doc/guides/rel_notes/release_22_03.rst
index 41923f50e6..112dde0e79 100644
--- a/doc/guides/rel_notes/release_22_03.rst
+++ b/doc/guides/rel_notes/release_22_03.rst
@@ -130,6 +130,10 @@ New Features
* Added LED OEM support.
+* **Updated Marvell cnxk ethdev PMD.**
+
+ * Added queue based priority flow control support for CN9K & CN10K.
+
* **Added an API for private user data in asymmetric crypto session.**
An API was added to get/set an asymmetric crypto session's user data.
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 3468aab329..0558bc3eed 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1251,6 +1251,8 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
goto cq_fini;
}
+ /* Initialize TC to SQ mapping as invalid */
+ memset(dev->pfc_tc_sq_map, 0xFF, sizeof(dev->pfc_tc_sq_map));
/*
* Restore queue config when reconfigure followed by
* reconfigure and no queue configure invoked from application case.
@@ -1547,6 +1549,10 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
.tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
.flow_ctrl_get = cnxk_nix_flow_ctrl_get,
.flow_ctrl_set = cnxk_nix_flow_ctrl_set,
+ .priority_flow_ctrl_queue_config =
+ cnxk_nix_priority_flow_ctrl_queue_config,
+ .priority_flow_ctrl_queue_info_get =
+ cnxk_nix_priority_flow_ctrl_queue_info_get,
.dev_set_link_up = cnxk_nix_set_link_up,
.dev_set_link_down = cnxk_nix_set_link_down,
.get_module_info = cnxk_nix_get_module_info,
@@ -1726,7 +1732,9 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+ struct rte_eth_pfc_queue_conf pfc_conf;
struct roc_nix *nix = &dev->nix;
+ struct rte_eth_fc_conf fc_conf;
int rc, i;
/* Disable switch hdr pkind */
@@ -1744,6 +1752,30 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
roc_nix_npc_rx_ena_dis(nix, false);
+ /* Restore 802.3 Flow control configuration */
+ memset(&pfc_conf, 0, sizeof(struct rte_eth_pfc_queue_conf));
+ memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
+ fc_conf.mode = RTE_ETH_FC_NONE;
+ rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
+ pfc_conf.mode = RTE_ETH_FC_NONE;
+ for (i = 0; i < CNXK_NIX_PFC_CHAN_COUNT; i++) {
+ if (dev->pfc_tc_sq_map[i] != 0xFFFF) {
+ pfc_conf.rx_pause.tx_qid = dev->pfc_tc_sq_map[i];
+ pfc_conf.rx_pause.tc = i;
+ pfc_conf.tx_pause.rx_qid = i;
+ pfc_conf.tx_pause.tc = i;
+ rc = cnxk_nix_priority_flow_ctrl_queue_config(eth_dev,
+ &pfc_conf);
+ if (rc)
+ plt_err("Failed to reset PFC. error code(%d)",
+ rc);
+ }
+ }
+
+ fc_conf.mode = RTE_ETH_FC_FULL;
+ rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
+
/* Disable and free rte_meter entries */
nix_meter_fini(dev);
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index ad568c9fcd..d71e7465e1 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -137,12 +137,24 @@
/* SPI will be in 20 bits of tag */
#define CNXK_ETHDEV_SPI_TAG_MASK 0xFFFFFUL
+#define CNXK_NIX_PFC_CHAN_COUNT 16
+
struct cnxk_fc_cfg {
enum rte_eth_fc_mode mode;
uint8_t rx_pause;
uint8_t tx_pause;
};
+struct cnxk_pfc_cfg {
+ struct cnxk_fc_cfg fc_cfg;
+ uint16_t class_en;
+ uint16_t pause_time;
+ uint8_t rx_tc;
+ uint8_t rx_qid;
+ uint8_t tx_tc;
+ uint8_t tx_qid;
+};
+
struct cnxk_eth_qconf {
union {
struct rte_eth_txconf tx;
@@ -372,6 +384,8 @@ struct cnxk_eth_dev {
struct cnxk_eth_qconf *rx_qconf;
/* Flow control configuration */
+ uint16_t pfc_tc_sq_map[CNXK_NIX_PFC_CHAN_COUNT];
+ struct cnxk_pfc_cfg pfc_cfg;
struct cnxk_fc_cfg fc_cfg;
/* PTP Counters */
@@ -473,6 +487,10 @@ int cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
struct rte_eth_fc_conf *fc_conf);
int cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
struct rte_eth_fc_conf *fc_conf);
+int cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
+ struct rte_eth_pfc_queue_conf *pfc_conf);
+int cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_pfc_queue_info *pfc_info);
int cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev);
int cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev);
int cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev,
@@ -617,6 +635,8 @@ int nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t id,
uint32_t *prev_id, uint32_t *next_id,
struct cnxk_mtr_policy_node *policy,
int *tree_level);
+int nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+ struct cnxk_pfc_cfg *conf);
/* Inlines */
static __rte_always_inline uint64_t
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index 1ae90092d6..b0a16f3c56 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -230,6 +230,8 @@ nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
cq = &dev->cqs[qid];
fc_cfg.type = ROC_NIX_FC_CQ_CFG;
fc_cfg.cq_cfg.enable = enable;
+ /* Map all CQs to last channel */
+ fc_cfg.cq_cfg.tc = roc_nix_chan_count_get(nix) - 1;
fc_cfg.cq_cfg.rq = qid;
fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
@@ -248,6 +250,8 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_data *data = eth_dev->data;
struct cnxk_fc_cfg *fc = &dev->fc_cfg;
struct roc_nix *nix = &dev->nix;
+ struct cnxk_eth_rxq_sp *rxq;
+ struct cnxk_eth_txq_sp *txq;
uint8_t rx_pause, tx_pause;
int rc, i;
@@ -282,7 +286,12 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
}
for (i = 0; i < data->nb_rx_queues; i++) {
- rc = nix_fc_cq_config_set(dev, i, tx_pause);
+ struct roc_nix_fc_cfg fc_cfg;
+
+ memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+ rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[i]) -
+ 1;
+ rc = nix_fc_cq_config_set(dev, rxq->qid, !!tx_pause);
if (rc)
return rc;
}
@@ -290,14 +299,19 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
/* Check if RX pause frame is enabled or not */
if (fc->rx_pause ^ rx_pause) {
- struct roc_nix_fc_cfg fc_cfg;
-
- memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
- fc_cfg.type = ROC_NIX_FC_TM_CFG;
- fc_cfg.tm_cfg.enable = !!rx_pause;
- rc = roc_nix_fc_config_set(nix, &fc_cfg);
- if (rc)
- return rc;
+ for (i = 0; i < data->nb_tx_queues; i++) {
+ struct roc_nix_fc_cfg fc_cfg;
+
+ memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+ txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[i]) -
+ 1;
+ fc_cfg.type = ROC_NIX_FC_TM_CFG;
+ fc_cfg.tm_cfg.sq = txq->qid;
+ fc_cfg.tm_cfg.enable = !!rx_pause;
+ rc = roc_nix_fc_config_set(nix, &fc_cfg);
+ if (rc)
+ return rc;
+ }
}
rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
@@ -311,6 +325,42 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
return rc;
}
+int
+cnxk_nix_priority_flow_ctrl_queue_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_pfc_queue_info *pfc_info)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+
+ pfc_info->tc_max = roc_nix_chan_count_get(&dev->nix);
+ pfc_info->mode_capa = RTE_ETH_FC_FULL;
+ return 0;
+}
+
+int
+cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
+ struct rte_eth_pfc_queue_conf *pfc_conf)
+{
+ struct cnxk_pfc_cfg conf;
+ int rc;
+
+ memset(&conf, 0, sizeof(struct cnxk_pfc_cfg));
+
+ conf.fc_cfg.mode = pfc_conf->mode;
+
+ conf.pause_time = pfc_conf->tx_pause.pause_time;
+ conf.rx_tc = pfc_conf->tx_pause.tc;
+ conf.rx_qid = pfc_conf->tx_pause.rx_qid;
+
+ conf.tx_tc = pfc_conf->rx_pause.tc;
+ conf.tx_qid = pfc_conf->rx_pause.tx_qid;
+
+ rc = nix_priority_flow_ctrl_configure(eth_dev, &conf);
+ if (rc)
+ return rc;
+
+ return rc;
+}
+
int
cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev,
const struct rte_flow_ops **ops)
@@ -972,3 +1022,123 @@ cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
return 0;
}
+
+int
+nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
+ struct cnxk_pfc_cfg *conf)
+{
+ enum roc_nix_fc_mode mode_map[] = {ROC_NIX_FC_NONE, ROC_NIX_FC_RX,
+ ROC_NIX_FC_TX, ROC_NIX_FC_FULL};
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
+ struct roc_nix *nix = &dev->nix;
+ struct roc_nix_pfc_cfg pfc_cfg;
+ struct roc_nix_fc_cfg fc_cfg;
+ struct cnxk_eth_rxq_sp *rxq;
+ struct cnxk_eth_txq_sp *txq;
+ uint8_t rx_pause, tx_pause;
+ enum rte_eth_fc_mode mode;
+ struct roc_nix_cq *cq;
+ struct roc_nix_sq *sq;
+ int rc;
+
+ if (roc_nix_is_vf_or_sdp(nix)) {
+ plt_err("Prio flow ctrl config is not allowed on VF and SDP");
+ return -ENOTSUP;
+ }
+
+ if (roc_model_is_cn96_ax() && data->dev_started) {
+ /* On Ax, CQ should be in disabled state
+ * while setting flow control configuration.
+ */
+ plt_info("Stop the port=%d for setting flow control",
+ data->port_id);
+ return 0;
+ }
+
+ if (dev->pfc_tc_sq_map[conf->tx_tc] != 0xFFFF &&
+ dev->pfc_tc_sq_map[conf->tx_tc] != conf->tx_qid) {
+ plt_err("Same TC can not be configured on multiple SQs");
+ return -ENOTSUP;
+ }
+
+ mode = conf->fc_cfg.mode;
+ rx_pause = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_RX_PAUSE);
+ tx_pause = (mode == RTE_ETH_FC_FULL) || (mode == RTE_ETH_FC_TX_PAUSE);
+
+ /* Configure CQs */
+ memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+ rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[conf->rx_qid]) - 1;
+ cq = &dev->cqs[rxq->qid];
+ fc_cfg.type = ROC_NIX_FC_CQ_CFG;
+ fc_cfg.cq_cfg.tc = conf->rx_tc;
+ fc_cfg.cq_cfg.enable = !!tx_pause;
+ fc_cfg.cq_cfg.rq = cq->qid;
+ fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
+ rc = roc_nix_fc_config_set(nix, &fc_cfg);
+ if (rc)
+ goto exit;
+
+ /* Check if RX pause frame is enabled or not */
+ if (pfc->fc_cfg.rx_pause ^ rx_pause) {
+ if (conf->tx_qid >= eth_dev->data->nb_tx_queues)
+ goto exit;
+
+ if ((roc_nix_tm_tree_type_get(nix) == ROC_NIX_TM_DEFAULT) &&
+ eth_dev->data->nb_tx_queues > 1) {
+ /*
+ * Disabled xmit will be enabled when
+ * new topology is available.
+ */
+ rc = roc_nix_tm_hierarchy_disable(nix);
+ if (rc)
+ goto exit;
+
+ rc = roc_nix_tm_pfc_prepare_tree(nix);
+ if (rc)
+ goto exit;
+
+ rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_PFC,
+ true);
+ if (rc)
+ goto exit;
+ }
+ }
+
+ txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[conf->tx_qid]) - 1;
+ sq = &dev->sqs[txq->qid];
+ memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+ fc_cfg.type = ROC_NIX_FC_TM_CFG;
+ fc_cfg.tm_cfg.sq = sq->qid;
+ fc_cfg.tm_cfg.tc = conf->tx_tc;
+ fc_cfg.tm_cfg.enable = !!rx_pause;
+ rc = roc_nix_fc_config_set(nix, &fc_cfg);
+ if (rc)
+ return rc;
+
+ dev->pfc_tc_sq_map[conf->tx_tc] = sq->qid;
+
+ /* Configure MAC block */
+ if (tx_pause)
+ pfc->class_en |= BIT(conf->rx_tc);
+ else
+ pfc->class_en &= ~BIT(conf->rx_tc);
+
+ if (pfc->class_en)
+ mode = RTE_ETH_FC_FULL;
+
+ memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
+ pfc_cfg.mode = mode_map[mode];
+ pfc_cfg.tc = pfc->class_en;
+ rc = roc_nix_pfc_mode_set(nix, &pfc_cfg);
+ if (rc)
+ return rc;
+
+ pfc->fc_cfg.rx_pause = rx_pause;
+ pfc->fc_cfg.tx_pause = tx_pause;
+ pfc->fc_cfg.mode = mode;
+
+exit:
+ return rc;
+}
--
2.25.1
next prev parent reply other threads:[~2022-02-22 10:38 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-01-09 11:11 [PATCH v1 1/2] common/cnxk: support priority flow ctrl config API skori
2022-01-09 11:11 ` [PATCH v1 2/2] net/cnxk: support priority flow control skori
2022-01-09 11:18 ` Sunil Kumar Kori
2022-01-11 8:18 ` [PATCH v2 1/2] common/cnxk: support priority flow ctrl config API skori
2022-01-11 8:18 ` [PATCH v2 2/2] net/cnxk: support priority flow control skori
2022-01-18 13:28 ` [PATCH v3 1/2] common/cnxk: support priority flow ctrl config API skori
2022-01-18 13:28 ` [PATCH v3 2/2] net/cnxk: support priority flow control skori
2022-01-20 16:59 ` [PATCH v4 1/2] common/cnxk: support priority flow ctrl config API skori
2022-01-20 16:59 ` [PATCH v4 2/2] net/cnxk: support priority flow control skori
2022-01-25 10:02 ` [PATCH v4 1/2] common/cnxk: support priority flow ctrl config API Ray Kinsella
2022-01-25 10:19 ` [EXT] " Sunil Kumar Kori
2022-01-25 11:23 ` [PATCH v5 " skori
2022-01-25 11:23 ` [PATCH v5 2/2] net/cnxk: support priority flow control skori
2022-01-28 13:28 ` [PATCH v6 1/2] common/cnxk: support priority flow ctrl config API skori
2022-01-28 13:29 ` [PATCH v6 2/2] net/cnxk: support priority flow control skori
2022-02-07 17:21 ` [PATCH v7 1/2] common/cnxk: support priority flow ctrl config API skori
2022-02-07 17:21 ` [PATCH v7 2/2] net/cnxk: support priority flow control skori
2022-02-14 9:02 ` [PATCH v8 1/2] common/cnxk: support priority flow ctrl config API skori
2022-02-14 9:02 ` [PATCH v8 2/2] net/cnxk: support priority flow control skori
2022-02-14 10:10 ` [PATCH v9 1/2] common/cnxk: support priority flow ctrl config API skori
2022-02-14 10:10 ` [PATCH v9 2/2] net/cnxk: support priority flow control skori
2022-02-18 6:11 ` Jerin Jacob
2022-02-22 8:06 ` [EXT] " Sunil Kumar Kori
2022-02-22 8:58 ` [PATCH v10 1/2] common/cnxk: support priority flow ctrl config API skori
2022-02-22 8:58 ` [PATCH v10 2/2] net/cnxk: support priority flow control skori
2022-02-22 10:37 ` [PATCH v11 1/2] common/cnxk: support priority flow ctrl config API skori
2022-02-22 10:37 ` skori [this message]
2022-02-23 11:36 ` Jerin Jacob
2022-02-14 10:06 ` [PATCH v8 " Ray Kinsella
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220222103750.866907-2-skori@marvell.com \
--to=skori@marvell.com \
--cc=dev@dpdk.org \
--cc=kirankumark@marvell.com \
--cc=ndabilpuram@marvell.com \
--cc=skoteshwar@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).