From: Sunil Kumar Kori <skori@marvell.com>
To: Sunil Kumar Kori <skori@marvell.com>,
Nithin Kumar Dabilpuram <ndabilpuram@marvell.com>,
Kiran Kumar Kokkilagadda <kirankumark@marvell.com>,
Satha Koteswara Rao Kottidi <skoteshwar@marvell.com>
Cc: "dev@dpdk.org" <dev@dpdk.org>
Subject: RE: [PATCH v1 2/2] net/cnxk: support priority flow control
Date: Sun, 9 Jan 2022 11:18:40 +0000 [thread overview]
Message-ID: <CO6PR18MB3860B5EB4D1469A84459DE5BB44F9@CO6PR18MB3860.namprd18.prod.outlook.com> (raw)
In-Reply-To: <20220109111130.751933-2-skori@marvell.com>
Following patch sets are dependent on http://patches.dpdk.org/project/dpdk/patch/20220109105851.734687-1-skori@marvell.com/.
Regards
Sunil Kumar Kori
>-----Original Message-----
>From: skori@marvell.com <skori@marvell.com>
>Sent: Sunday, January 9, 2022 4:42 PM
>To: Nithin Kumar Dabilpuram <ndabilpuram@marvell.com>; Kiran Kumar
>Kokkilagadda <kirankumark@marvell.com>; Sunil Kumar Kori
><skori@marvell.com>; Satha Koteswara Rao Kottidi
><skoteshwar@marvell.com>
>Cc: dev@dpdk.org
>Subject: [PATCH v1 2/2] net/cnxk: support priority flow control
>
>From: Sunil Kumar Kori <skori@marvell.com>
>
>Patch implements priority flow control support for CNXK platforms.
>
>Signed-off-by: Sunil Kumar Kori <skori@marvell.com>
>---
> drivers/net/cnxk/cnxk_ethdev.c | 19 ++++
> drivers/net/cnxk/cnxk_ethdev.h | 16 +++
> drivers/net/cnxk/cnxk_ethdev_ops.c | 177 +++++++++++++++++++++++++++--
> 3 files changed, 203 insertions(+), 9 deletions(-)
>
>diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
>index 74f625553d..382d88bbf3 100644
>--- a/drivers/net/cnxk/cnxk_ethdev.c
>+++ b/drivers/net/cnxk/cnxk_ethdev.c
>@@ -1260,6 +1260,8 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev)
> goto cq_fini;
> }
>
>+ /* Initialize TC to SQ mapping as invalid */
>+ memset(dev->pfc_tc_sq_map, 0xFF, sizeof(dev->pfc_tc_sq_map));
> /*
> * Restore queue config when reconfigure followed by
> * reconfigure and no queue configure invoked from application case.
>@@ -1548,6 +1550,7 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
> .tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
> .flow_ctrl_get = cnxk_nix_flow_ctrl_get,
> .flow_ctrl_set = cnxk_nix_flow_ctrl_set,
>+ .priority_flow_ctrl_queue_set =
>cnxk_nix_priority_flow_ctrl_queue_set,
> .dev_set_link_up = cnxk_nix_set_link_up,
> .dev_set_link_down = cnxk_nix_set_link_down,
> .get_module_info = cnxk_nix_get_module_info, @@ -1721,6 +1724,8
>@@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset) {
> struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
> const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
>+ struct rte_eth_pfc_queue_conf pfc_conf = {0};
>+ struct rte_eth_fc_conf fc_conf = {0};
> struct roc_nix *nix = &dev->nix;
> int rc, i;
>
>@@ -1736,6 +1741,20 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev,
>bool reset)
>
> roc_nix_npc_rx_ena_dis(nix, false);
>
>+ /* Restore 802.3 Flow control configuration */
>+ fc_conf.mode = RTE_ETH_FC_NONE;
>+ rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
>+
>+ pfc_conf.mode = RTE_ETH_FC_NONE;
>+ pfc_conf.rx_pause.tc = roc_nix_chan_count_get(nix) - 1;
>+ pfc_conf.tx_pause.tc = roc_nix_chan_count_get(nix) - 1;
>+ rc = cnxk_nix_priority_flow_ctrl_queue_set(eth_dev, &pfc_conf);
>+ if (rc)
>+ plt_err("Failed to reset PFC. error code(%d)", rc);
>+
>+ fc_conf.mode = RTE_ETH_FC_FULL;
>+ rc = cnxk_nix_flow_ctrl_set(eth_dev, &fc_conf);
>+
> /* Disable and free rte_meter entries */
> nix_meter_fini(dev);
>
>diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
>index 5bfda3d815..28fb19307a 100644
>--- a/drivers/net/cnxk/cnxk_ethdev.h
>+++ b/drivers/net/cnxk/cnxk_ethdev.h
>@@ -143,6 +143,16 @@ struct cnxk_fc_cfg {
> uint8_t tx_pause;
> };
>
>+struct cnxk_pfc_cfg {
>+ struct cnxk_fc_cfg fc_cfg;
>+ uint16_t class_en;
>+ uint16_t pause_time;
>+ uint8_t rx_tc;
>+ uint8_t rx_qid;
>+ uint8_t tx_tc;
>+ uint8_t tx_qid;
>+};
>+
> struct cnxk_eth_qconf {
> union {
> struct rte_eth_txconf tx;
>@@ -366,6 +376,8 @@ struct cnxk_eth_dev {
> struct cnxk_eth_qconf *rx_qconf;
>
> /* Flow control configuration */
>+ uint16_t pfc_tc_sq_map[16];
>+ struct cnxk_pfc_cfg pfc_cfg;
> struct cnxk_fc_cfg fc_cfg;
>
> /* PTP Counters */
>@@ -467,6 +479,8 @@ int cnxk_nix_flow_ctrl_set(struct rte_eth_dev
>*eth_dev,
> struct rte_eth_fc_conf *fc_conf); int
>cnxk_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
> struct rte_eth_fc_conf *fc_conf);
>+int cnxk_nix_priority_flow_ctrl_queue_set(struct rte_eth_dev *eth_dev,
>+ struct rte_eth_pfc_queue_conf
>*pfc_conf);
> int cnxk_nix_set_link_up(struct rte_eth_dev *eth_dev); int
>cnxk_nix_set_link_down(struct rte_eth_dev *eth_dev); int
>cnxk_nix_get_module_info(struct rte_eth_dev *eth_dev, @@ -606,6 +620,8
>@@ int nix_mtr_color_action_validate(struct rte_eth_dev *eth_dev, uint32_t
>id,
> uint32_t *prev_id, uint32_t *next_id,
> struct cnxk_mtr_policy_node *policy,
> int *tree_level);
>+int nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
>+ struct cnxk_pfc_cfg *conf);
>
> /* Inlines */
> static __rte_always_inline uint64_t
>diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c
>b/drivers/net/cnxk/cnxk_ethdev_ops.c
>index ce5f1f7240..27fa2da36d 100644
>--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
>+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
>@@ -69,6 +69,8 @@ cnxk_nix_info_get(struct rte_eth_dev *eth_dev, struct
>rte_eth_dev_info *devinfo)
> devinfo->dev_capa =
>RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
> RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
> devinfo->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
>+
>+ devinfo->pfc_queue_tc_max = roc_nix_chan_count_get(&dev->nix);
> return 0;
> }
>
>@@ -230,6 +232,8 @@ nix_fc_cq_config_set(struct cnxk_eth_dev *dev,
>uint16_t qid, bool enable)
> cq = &dev->cqs[qid];
> fc_cfg.type = ROC_NIX_FC_CQ_CFG;
> fc_cfg.cq_cfg.enable = enable;
>+ /* Map all CQs to last channel */
>+ fc_cfg.cq_cfg.tc = roc_nix_chan_count_get(nix) - 1;
> fc_cfg.cq_cfg.rq = qid;
> fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
>
>@@ -248,6 +252,8 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
> struct rte_eth_dev_data *data = eth_dev->data;
> struct cnxk_fc_cfg *fc = &dev->fc_cfg;
> struct roc_nix *nix = &dev->nix;
>+ struct cnxk_eth_rxq_sp *rxq;
>+ struct cnxk_eth_txq_sp *txq;
> uint8_t rx_pause, tx_pause;
> int rc, i;
>
>@@ -282,7 +288,12 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
> }
>
> for (i = 0; i < data->nb_rx_queues; i++) {
>- rc = nix_fc_cq_config_set(dev, i, tx_pause);
>+ struct roc_nix_fc_cfg fc_cfg;
>+
>+ memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
>+ rxq = ((struct cnxk_eth_rxq_sp *)
>+ data->rx_queues[i]) - 1;
>+ rc = nix_fc_cq_config_set(dev, rxq->qid, !!tx_pause);
> if (rc)
> return rc;
> }
>@@ -290,14 +301,19 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev
>*eth_dev,
>
> /* Check if RX pause frame is enabled or not */
> if (fc->rx_pause ^ rx_pause) {
>- struct roc_nix_fc_cfg fc_cfg;
>-
>- memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
>- fc_cfg.type = ROC_NIX_FC_TM_CFG;
>- fc_cfg.tm_cfg.enable = !!rx_pause;
>- rc = roc_nix_fc_config_set(nix, &fc_cfg);
>- if (rc)
>- return rc;
>+ for (i = 0; i < data->nb_tx_queues; i++) {
>+ struct roc_nix_fc_cfg fc_cfg;
>+
>+ memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
>+ txq = ((struct cnxk_eth_txq_sp *)
>+ data->tx_queues[i]) - 1;
>+ fc_cfg.type = ROC_NIX_FC_TM_CFG;
>+ fc_cfg.tm_cfg.sq = txq->qid;
>+ fc_cfg.tm_cfg.enable = !!rx_pause;
>+ rc = roc_nix_fc_config_set(nix, &fc_cfg);
>+ if (rc)
>+ return rc;
>+ }
> }
>
> rc = roc_nix_fc_mode_set(nix, mode_map[fc_conf->mode]); @@ -
>311,6 +327,29 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
> return rc;
> }
>
>+int
>+cnxk_nix_priority_flow_ctrl_queue_set(struct rte_eth_dev *eth_dev,
>+ struct rte_eth_pfc_queue_conf *pfc_conf) {
>+ struct cnxk_pfc_cfg conf = {0};
>+ int rc;
>+
>+ conf.fc_cfg.mode = pfc_conf->mode;
>+
>+ conf.pause_time = pfc_conf->tx_pause.pause_time;
>+ conf.rx_tc = pfc_conf->tx_pause.tc;
>+ conf.rx_qid = pfc_conf->tx_pause.rx_qid;
>+
>+ conf.tx_tc = pfc_conf->rx_pause.tc;
>+ conf.tx_qid = pfc_conf->rx_pause.tx_qid;
>+
>+ rc = nix_priority_flow_ctrl_configure(eth_dev, &conf);
>+ if (rc)
>+ return rc;
>+
>+ return rc;
>+}
>+
> int
> cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev,
> const struct rte_flow_ops **ops) @@ -911,3 +950,123 @@
>cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
>
> return 0;
> }
>+
>+int
>+nix_priority_flow_ctrl_configure(struct rte_eth_dev *eth_dev,
>+ struct cnxk_pfc_cfg *conf)
>+{
>+ enum roc_nix_fc_mode mode_map[] = {ROC_NIX_FC_NONE,
>ROC_NIX_FC_RX,
>+ ROC_NIX_FC_TX, ROC_NIX_FC_FULL};
>+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
>+ struct rte_eth_dev_data *data = eth_dev->data;
>+ struct cnxk_pfc_cfg *pfc = &dev->pfc_cfg;
>+ struct roc_nix *nix = &dev->nix;
>+ struct roc_nix_pfc_cfg pfc_cfg;
>+ struct roc_nix_fc_cfg fc_cfg;
>+ struct cnxk_eth_rxq_sp *rxq;
>+ struct cnxk_eth_txq_sp *txq;
>+ uint8_t rx_pause, tx_pause;
>+ enum rte_eth_fc_mode mode;
>+ struct roc_nix_cq *cq;
>+ struct roc_nix_sq *sq;
>+ int rc;
>+
>+ if (roc_nix_is_vf_or_sdp(nix)) {
>+ plt_err("Prio flow ctrl config is not allowed on VF and SDP");
>+ return -ENOTSUP;
>+ }
>+
>+ if (roc_model_is_cn96_ax() && data->dev_started) {
>+ /* On Ax, CQ should be in disabled state
>+ * while setting flow control configuration.
>+ */
>+ plt_info("Stop the port=%d for setting flow control",
>+ data->port_id);
>+ return 0;
>+ }
>+
>+ if (dev->pfc_tc_sq_map[conf->tx_tc] != 0xFFFF &&
>+ dev->pfc_tc_sq_map[conf->tx_tc] != conf->tx_qid) {
>+ plt_err("Same TC can not be configured on multiple SQs");
>+ return -ENOTSUP;
>+ }
>+
>+ mode = conf->fc_cfg.mode;
>+ rx_pause = (mode == RTE_FC_FULL) || (mode == RTE_FC_RX_PAUSE);
>+ tx_pause = (mode == RTE_FC_FULL) || (mode == RTE_FC_TX_PAUSE);
>+
>+ /* Configure CQs */
>+ memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
>+ rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[conf->rx_qid]) - 1;
>+ cq = &dev->cqs[rxq->qid];
>+ fc_cfg.type = ROC_NIX_FC_CQ_CFG;
>+ fc_cfg.cq_cfg.tc = conf->rx_tc;
>+ fc_cfg.cq_cfg.enable = !!tx_pause;
>+ fc_cfg.cq_cfg.rq = cq->qid;
>+ fc_cfg.cq_cfg.cq_drop = cq->drop_thresh;
>+ rc = roc_nix_fc_config_set(nix, &fc_cfg);
>+ if (rc)
>+ goto exit;
>+
>+ /* Check if RX pause frame is enabled or not */
>+ if (pfc->fc_cfg.rx_pause ^ rx_pause) {
>+ if (conf->tx_qid >= eth_dev->data->nb_tx_queues)
>+ goto exit;
>+
>+ if ((roc_nix_tm_tree_type_get(nix) != ROC_NIX_TM_PFC) &&
>+ eth_dev->data->nb_tx_queues > 1) {
>+ /*
>+ * Disabled xmit will be enabled when
>+ * new topology is available.
>+ */
>+ rc = roc_nix_tm_hierarchy_disable(nix);
>+ if (rc)
>+ goto exit;
>+
>+ rc = roc_nix_tm_prepare_pfc_tree(nix);
>+ if (rc)
>+ goto exit;
>+
>+ rc = roc_nix_tm_hierarchy_enable(nix,
>ROC_NIX_TM_PFC,
>+ true);
>+ if (rc)
>+ goto exit;
>+ }
>+ }
>+
>+ txq = ((struct cnxk_eth_txq_sp *)data->rx_queues[conf->tx_qid]) - 1;
>+ sq = &dev->sqs[txq->qid];
>+ memset(&fc_cfg, 0, sizeof(struct roc_nix_fc_cfg));
>+ fc_cfg.type = ROC_NIX_FC_TM_CFG;
>+ fc_cfg.tm_cfg.sq = sq->qid;
>+ fc_cfg.tm_cfg.tc = conf->tx_tc;
>+ fc_cfg.tm_cfg.enable = !!rx_pause;
>+ rc = roc_nix_fc_config_set(nix, &fc_cfg);
>+ if (rc)
>+ return rc;
>+
>+ dev->pfc_tc_sq_map[conf->tx_tc] = sq->qid;
>+
>+ /* Configure MAC block */
>+ if (tx_pause)
>+ pfc->class_en |= BIT(conf->rx_tc);
>+ else
>+ pfc->class_en &= ~BIT(conf->rx_tc);
>+
>+ if (pfc->class_en)
>+ mode = RTE_ETH_FC_FULL;
>+
>+ memset(&pfc_cfg, 0, sizeof(struct roc_nix_pfc_cfg));
>+ pfc_cfg.mode = mode_map[mode];
>+ pfc_cfg.tc = conf->rx_tc;
>+ rc = roc_nix_pfc_mode_set(nix, &pfc_cfg);
>+ if (rc)
>+ return rc;
>+
>+ pfc->fc_cfg.rx_pause = rx_pause;
>+ pfc->fc_cfg.tx_pause = tx_pause;
>+ pfc->fc_cfg.mode = mode;
>+
>+exit:
>+ return rc;
>+}
>--
>2.25.1
next prev parent reply other threads:[~2022-01-09 11:18 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-01-09 11:11 [PATCH v1 1/2] common/cnxk: support priority flow ctrl config API skori
2022-01-09 11:11 ` [PATCH v1 2/2] net/cnxk: support priority flow control skori
2022-01-09 11:18 ` Sunil Kumar Kori [this message]
2022-01-11 8:18 ` [PATCH v2 1/2] common/cnxk: support priority flow ctrl config API skori
2022-01-11 8:18 ` [PATCH v2 2/2] net/cnxk: support priority flow control skori
2022-01-18 13:28 ` [PATCH v3 1/2] common/cnxk: support priority flow ctrl config API skori
2022-01-18 13:28 ` [PATCH v3 2/2] net/cnxk: support priority flow control skori
2022-01-20 16:59 ` [PATCH v4 1/2] common/cnxk: support priority flow ctrl config API skori
2022-01-20 16:59 ` [PATCH v4 2/2] net/cnxk: support priority flow control skori
2022-01-25 10:02 ` [PATCH v4 1/2] common/cnxk: support priority flow ctrl config API Ray Kinsella
2022-01-25 10:19 ` [EXT] " Sunil Kumar Kori
2022-01-25 11:23 ` [PATCH v5 " skori
2022-01-25 11:23 ` [PATCH v5 2/2] net/cnxk: support priority flow control skori
2022-01-28 13:28 ` [PATCH v6 1/2] common/cnxk: support priority flow ctrl config API skori
2022-01-28 13:29 ` [PATCH v6 2/2] net/cnxk: support priority flow control skori
2022-02-07 17:21 ` [PATCH v7 1/2] common/cnxk: support priority flow ctrl config API skori
2022-02-07 17:21 ` [PATCH v7 2/2] net/cnxk: support priority flow control skori
2022-02-14 9:02 ` [PATCH v8 1/2] common/cnxk: support priority flow ctrl config API skori
2022-02-14 9:02 ` [PATCH v8 2/2] net/cnxk: support priority flow control skori
2022-02-14 10:10 ` [PATCH v9 1/2] common/cnxk: support priority flow ctrl config API skori
2022-02-14 10:10 ` [PATCH v9 2/2] net/cnxk: support priority flow control skori
2022-02-18 6:11 ` Jerin Jacob
2022-02-22 8:06 ` [EXT] " Sunil Kumar Kori
2022-02-22 8:58 ` [PATCH v10 1/2] common/cnxk: support priority flow ctrl config API skori
2022-02-22 8:58 ` [PATCH v10 2/2] net/cnxk: support priority flow control skori
2022-02-22 10:37 ` [PATCH v11 1/2] common/cnxk: support priority flow ctrl config API skori
2022-02-22 10:37 ` [PATCH v11 2/2] net/cnxk: support priority flow control skori
2022-02-23 11:36 ` [PATCH v11 1/2] common/cnxk: support priority flow ctrl config API Jerin Jacob
2022-02-14 10:06 ` [PATCH v8 " Ray Kinsella
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=CO6PR18MB3860B5EB4D1469A84459DE5BB44F9@CO6PR18MB3860.namprd18.prod.outlook.com \
--to=skori@marvell.com \
--cc=dev@dpdk.org \
--cc=kirankumark@marvell.com \
--cc=ndabilpuram@marvell.com \
--cc=skoteshwar@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).