From: Nithin Dabilpuram <ndabilpuram@marvell.com>
To: Nithin Dabilpuram <ndabilpuram@marvell.com>,
Kiran Kumar K <kirankumark@marvell.com>,
Sunil Kumar Kori <skori@marvell.com>,
Satha Rao <skoteshwar@marvell.com>
Cc: <jerinj@marvell.com>, <dev@dpdk.org>
Subject: [PATCH 08/15] net/cnxk: check flow control config per queue on dev start
Date: Fri, 3 Mar 2023 13:40:06 +0530 [thread overview]
Message-ID: <20230303081013.589868-8-ndabilpuram@marvell.com> (raw)
In-Reply-To: <20230303081013.589868-1-ndabilpuram@marvell.com>
Check and enable/disable flow control config per queue on
device start to handle cases like SSO enablement, TM changes etc.
Modify flow control config get to get status per RQ/SQ.
Also disallow changes to flow control config when device
is in started state.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/net/cnxk/cnxk_ethdev.c | 9 +-
drivers/net/cnxk/cnxk_ethdev_ops.c | 198 ++++++++++++++++-------------
2 files changed, 113 insertions(+), 94 deletions(-)
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index e99335b117..d8ccd307a8 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -363,7 +363,7 @@ nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
struct cnxk_fc_cfg *fc = &dev->fc_cfg;
int rc;
- if (roc_nix_is_vf_or_sdp(&dev->nix))
+ if (roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix))
return 0;
/* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
@@ -388,7 +388,11 @@ nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
struct cnxk_fc_cfg *fc = &dev->fc_cfg;
struct rte_eth_fc_conf fc_cfg = {0};
- if (roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix))
+ if (roc_nix_is_sdp(&dev->nix))
+ return 0;
+
+ /* Don't do anything if PFC is enabled */
+ if (dev->pfc_cfg.rx_pause_en || dev->pfc_cfg.tx_pause_en)
return 0;
fc_cfg.mode = fc->mode;
@@ -481,7 +485,6 @@ cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
sq->qid = qid;
sq->nb_desc = nb_desc;
sq->max_sqe_sz = nix_sq_max_sqe_sz(dev);
- sq->tc = ROC_NIX_PFC_CLASS_INVALID;
if (nix->tx_compl_ena) {
sq->cqid = sq->qid + dev->nb_rxq;
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index a6ab493626..5df7927d7b 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -205,12 +205,15 @@ cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
struct rte_eth_fc_conf *fc_conf)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
- enum rte_eth_fc_mode mode_map[] = {
- RTE_ETH_FC_NONE, RTE_ETH_FC_RX_PAUSE,
- RTE_ETH_FC_TX_PAUSE, RTE_ETH_FC_FULL
- };
+ enum rte_eth_fc_mode mode_map[2][2] = {
+ [0][0] = RTE_ETH_FC_NONE,
+ [0][1] = RTE_ETH_FC_TX_PAUSE,
+ [1][0] = RTE_ETH_FC_RX_PAUSE,
+ [1][1] = RTE_ETH_FC_FULL,
+ };
struct roc_nix *nix = &dev->nix;
- int mode;
+ uint8_t rx_pause, tx_pause;
+ int mode, i;
if (roc_nix_is_sdp(nix))
return 0;
@@ -219,32 +222,25 @@ cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
if (mode < 0)
return mode;
+ rx_pause = (mode == ROC_NIX_FC_FULL) || (mode == ROC_NIX_FC_RX);
+ tx_pause = (mode == ROC_NIX_FC_FULL) || (mode == ROC_NIX_FC_TX);
+
+ /* Report flow control as disabled even if one RQ/SQ has it disabled */
+ for (i = 0; i < dev->nb_rxq; i++) {
+ if (dev->rqs[i].tc == ROC_NIX_PFC_CLASS_INVALID)
+ tx_pause = 0;
+ }
+
+ for (i = 0; i < dev->nb_txq; i++) {
+ if (dev->sqs[i].tc == ROC_NIX_PFC_CLASS_INVALID)
+ rx_pause = 0;
+ }
+
memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf));
- fc_conf->mode = mode_map[mode];
+ fc_conf->mode = mode_map[rx_pause][tx_pause];
return 0;
}
-static int
-nix_fc_cq_config_set(struct cnxk_eth_dev *dev, uint16_t qid, bool enable)
-{
- struct roc_nix *nix = &dev->nix;
- struct roc_nix_fc_cfg fc_cfg;
- struct roc_nix_cq *cq;
- struct roc_nix_rq *rq;
-
- memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
- rq = &dev->rqs[qid];
- cq = &dev->cqs[qid];
- fc_cfg.type = ROC_NIX_FC_RQ_CFG;
- fc_cfg.rq_cfg.enable = enable;
- fc_cfg.rq_cfg.tc = 0;
- fc_cfg.rq_cfg.rq = qid;
- fc_cfg.rq_cfg.pool = rq->aura_handle;
- fc_cfg.rq_cfg.cq_drop = cq->drop_thresh;
-
- return roc_nix_fc_config_set(nix, &fc_cfg);
-}
-
int
cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
struct rte_eth_fc_conf *fc_conf)
@@ -260,68 +256,90 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
struct cnxk_eth_rxq_sp *rxq;
struct cnxk_eth_txq_sp *txq;
uint8_t rx_pause, tx_pause;
+ struct roc_nix_sq *sq;
+ struct roc_nix_cq *cq;
+ struct roc_nix_rq *rq;
+ uint8_t tc;
int rc, i;
if (roc_nix_is_sdp(nix))
return 0;
+ if (dev->pfc_cfg.rx_pause_en || dev->pfc_cfg.tx_pause_en) {
+ plt_err("Disable PFC before configuring Flow Control");
+ return -ENOTSUP;
+ }
+
if (fc_conf->high_water || fc_conf->low_water || fc_conf->pause_time ||
fc_conf->mac_ctrl_frame_fwd || fc_conf->autoneg) {
plt_info("Only MODE configuration is supported");
return -EINVAL;
}
-
- rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
- (fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
- tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) ||
- (fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
-
- if (fc_conf->mode == fc->mode) {
- fc->rx_pause = rx_pause;
- fc->tx_pause = tx_pause;
- return 0;
+ /* Disallow flow control changes when device is in started state */
+ if (data->dev_started) {
+ plt_info("Stop the port=%d for setting flow control", data->port_id);
+ return -EBUSY;
}
+ rx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) || (fc_conf->mode == RTE_ETH_FC_RX_PAUSE);
+ tx_pause = (fc_conf->mode == RTE_ETH_FC_FULL) || (fc_conf->mode == RTE_ETH_FC_TX_PAUSE);
+
/* Check if TX pause frame is already enabled or not */
- if (fc->tx_pause ^ tx_pause) {
- if (roc_model_is_cn96_ax() && data->dev_started) {
- /* On Ax, CQ should be in disabled state
- * while setting flow control configuration.
- */
- plt_info("Stop the port=%d for setting flow control",
- data->port_id);
- return 0;
- }
+ tc = tx_pause ? 0 : ROC_NIX_PFC_CLASS_INVALID;
+ for (i = 0; i < data->nb_rx_queues; i++) {
+ struct roc_nix_fc_cfg fc_cfg;
- for (i = 0; i < data->nb_rx_queues; i++) {
- struct roc_nix_fc_cfg fc_cfg;
+ /* Skip if RQ does not exist */
+ if (!data->rx_queues[i])
+ continue;
- memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
- rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[i]) -
- 1;
- rxq->tx_pause = !!tx_pause;
- rc = nix_fc_cq_config_set(dev, rxq->qid, !!tx_pause);
- if (rc)
- return rc;
- }
+ rxq = cnxk_eth_rxq_to_sp(data->rx_queues[i]);
+ rq = &dev->rqs[rxq->qid];
+ cq = &dev->cqs[rxq->qid];
+
+ /* Skip if RQ is in expected state */
+ if (fc->tx_pause == tx_pause && rq->tc == tc)
+ continue;
+
+ memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+ fc_cfg.type = ROC_NIX_FC_RQ_CFG;
+ fc_cfg.rq_cfg.enable = !!tx_pause;
+ fc_cfg.rq_cfg.tc = 0;
+ fc_cfg.rq_cfg.rq = rq->qid;
+ fc_cfg.rq_cfg.pool = rq->aura_handle;
+ fc_cfg.rq_cfg.cq_drop = cq->drop_thresh;
+
+ rc = roc_nix_fc_config_set(nix, &fc_cfg);
+ if (rc)
+ return rc;
+ rxq->tx_pause = !!tx_pause;
}
/* Check if RX pause frame is enabled or not */
- if (fc->rx_pause ^ rx_pause) {
- for (i = 0; i < data->nb_tx_queues; i++) {
- struct roc_nix_fc_cfg fc_cfg;
-
- memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
- txq = ((struct cnxk_eth_txq_sp *)data->tx_queues[i]) -
- 1;
- fc_cfg.type = ROC_NIX_FC_TM_CFG;
- fc_cfg.tm_cfg.sq = txq->qid;
- fc_cfg.tm_cfg.enable = !!rx_pause;
- rc = roc_nix_fc_config_set(nix, &fc_cfg);
- if (rc)
- return rc;
- }
+ tc = rx_pause ? 0 : ROC_NIX_PFC_CLASS_INVALID;
+ for (i = 0; i < data->nb_tx_queues; i++) {
+ struct roc_nix_fc_cfg fc_cfg;
+
+ /* Skip if SQ does not exist */
+ if (!data->tx_queues[i])
+ continue;
+
+ txq = cnxk_eth_txq_to_sp(data->tx_queues[i]);
+ sq = &dev->sqs[txq->qid];
+
+ /* Skip if SQ is in expected state */
+ if (fc->rx_pause == rx_pause && sq->tc == tc)
+ continue;
+
+ memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
+ fc_cfg.type = ROC_NIX_FC_TM_CFG;
+ fc_cfg.tm_cfg.sq = txq->qid;
+ fc_cfg.tm_cfg.tc = 0;
+ fc_cfg.tm_cfg.enable = !!rx_pause;
+ rc = roc_nix_fc_config_set(nix, &fc_cfg);
+ if (rc && rc != EEXIST)
+ return rc;
}
rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]);
@@ -350,6 +368,7 @@ cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
struct rte_eth_pfc_queue_conf *pfc_conf)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
struct roc_nix *nix = &dev->nix;
enum rte_eth_fc_mode mode;
uint8_t en, tc;
@@ -366,6 +385,12 @@ cnxk_nix_priority_flow_ctrl_queue_config(struct rte_eth_dev *eth_dev,
return -ENOTSUP;
}
+ /* Disallow flow control changes when device is in started state */
+ if (data->dev_started) {
+ plt_info("Stop the port=%d for setting PFC", data->port_id);
+ return -EBUSY;
+ }
+
mode = pfc_conf->mode;
/* Perform Tx pause configuration on RQ */
@@ -1094,7 +1119,7 @@ nix_priority_flow_ctrl_rq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
enum roc_nix_fc_mode mode;
struct roc_nix_rq *rq;
struct roc_nix_cq *cq;
- int rc;
+ int rc, i;
if (roc_model_is_cn96_ax() && data->dev_started) {
/* On Ax, CQ should be in disabled state
@@ -1127,15 +1152,13 @@ nix_priority_flow_ctrl_rq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
if (rc)
return rc;
- if (rxq->tx_pause != tx_pause) {
- if (tx_pause)
- pfc->tx_pause_en++;
- else
- pfc->tx_pause_en--;
- }
-
rxq->tx_pause = !!tx_pause;
rxq->tc = tc;
+ /* Recheck number of RQ's that have PFC enabled */
+ pfc->tx_pause_en = 0;
+ for (i = 0; i < dev->nb_rxq; i++)
+ if (dev->rqs[i].tc != ROC_NIX_PFC_CLASS_INVALID)
+ pfc->tx_pause_en++;
/* Skip if PFC already enabled in mac */
if (pfc->tx_pause_en > 1)
@@ -1168,7 +1191,7 @@ nix_priority_flow_ctrl_sq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
struct cnxk_eth_txq_sp *txq;
enum roc_nix_fc_mode mode;
struct roc_nix_sq *sq;
- int rc;
+ int rc, i;
if (data->tx_queues == NULL)
return -EINVAL;
@@ -1212,18 +1235,11 @@ nix_priority_flow_ctrl_sq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
if (rc)
return rc;
- /* Maintaining a count for SQs which are configured for PFC. This is
- * required to handle disabling of a particular SQ without affecting
- * PFC on other SQs.
- */
- if (!fc_cfg.tm_cfg.enable && sq->tc != ROC_NIX_PFC_CLASS_INVALID) {
- sq->tc = ROC_NIX_PFC_CLASS_INVALID;
- pfc->rx_pause_en--;
- } else if (fc_cfg.tm_cfg.enable &&
- sq->tc == ROC_NIX_PFC_CLASS_INVALID) {
- sq->tc = tc;
- pfc->rx_pause_en++;
- }
+ /* Recheck number of SQ's that have PFC enabled */
+ pfc->rx_pause_en = 0;
+ for (i = 0; i < dev->nb_txq; i++)
+ if (dev->sqs[i].tc != ROC_NIX_PFC_CLASS_INVALID)
+ pfc->rx_pause_en++;
if (pfc->rx_pause_en > 1)
goto exit;
--
2.25.1
next prev parent reply other threads:[~2023-03-03 8:11 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-03-03 8:09 [PATCH 01/15] net/cnxk: resolve sefgault caused during transmit completion Nithin Dabilpuram
2023-03-03 8:10 ` [PATCH 02/15] net/cnxk: fix data len for first seg with multi seg pkt Nithin Dabilpuram
2023-03-03 8:10 ` [PATCH 03/15] net/cnxk: release LBK bpid for after freeing resources Nithin Dabilpuram
2023-03-03 8:10 ` [PATCH 04/15] common/cnxk: add separate inline dev stats API Nithin Dabilpuram
2023-03-03 8:10 ` [PATCH 05/15] common/cnxk: distribute SQ's to sdp channels Nithin Dabilpuram
2023-03-03 8:10 ` [PATCH 06/15] common/cnxk: remove flow control config at queue setup Nithin Dabilpuram
2023-03-03 8:10 ` [PATCH 07/15] common/cnxk: enable 10K B0 support for inline IPsec Nithin Dabilpuram
2023-03-03 8:10 ` Nithin Dabilpuram [this message]
2023-03-03 8:10 ` [PATCH 09/15] net/cnxk: don't allow PFC configuration on started port Nithin Dabilpuram
2023-03-03 8:10 ` [PATCH 10/15] net/cnxk: aura handle for fastpath Rx queues Nithin Dabilpuram
2023-03-03 8:10 ` [PATCH 11/15] common/cnxk: support of per NIX LF meta aura Nithin Dabilpuram
2023-03-03 8:10 ` [PATCH 12/15] common/cnxk: enable one to one SQ QINT mapping Nithin Dabilpuram
2023-03-03 8:10 ` [PATCH 13/15] common/cnxk: add RSS error messages on mbox failure Nithin Dabilpuram
2023-03-03 8:10 ` [PATCH 14/15] common/cnxk: add memory clobber to steor and ldeor Nithin Dabilpuram
2023-03-03 8:10 ` [PATCH 15/15] common/cnxk: enable SDP channel backpressure to TL4 Nithin Dabilpuram
2023-03-06 9:55 ` Jerin Jacob
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230303081013.589868-8-ndabilpuram@marvell.com \
--to=ndabilpuram@marvell.com \
--cc=dev@dpdk.org \
--cc=jerinj@marvell.com \
--cc=kirankumark@marvell.com \
--cc=skori@marvell.com \
--cc=skoteshwar@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).