DPDK patches and discussions
 help / color / mirror / Atom feed
From: Mingjin Ye <mingjinx.ye@intel.com>
To: dev@dpdk.org
Cc: Mingjin Ye <mingjinx.ye@intel.com>, Qiming Yang <qiming.yang@intel.com>
Subject: [PATCH v2] net/ice: support FEC feature
Date: Tue,  2 Jul 2024 08:02:44 +0000	[thread overview]
Message-ID: <20240702080244.1190884-1-mingjinx.ye@intel.com> (raw)
In-Reply-To: <20240411094548.1622662-1-mingjinx.ye@intel.com>

This patch enable three Forward Error Correction(FEC) related ops
in ice driver. As no speed information can get from HW, this patch
only show FEC capability.

Signed-off-by: Qiming Yang <qiming.yang@intel.com>
Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
v2: fix some logic
---
 doc/guides/nics/features/ice.ini |   1 +
 doc/guides/nics/ice.rst          |   5 +
 drivers/net/ice/ice_ethdev.c     | 292 +++++++++++++++++++++++++++++++
 3 files changed, 298 insertions(+)

diff --git a/doc/guides/nics/features/ice.ini b/doc/guides/nics/features/ice.ini
index 62869ef0a0..9c8569740a 100644
--- a/doc/guides/nics/features/ice.ini
+++ b/doc/guides/nics/features/ice.ini
@@ -11,6 +11,7 @@ Speed capabilities   = Y
 Link speed configuration = Y
 Link status          = Y
 Link status event    = Y
+FEC                  = Y
 Rx interrupt         = Y
 Fast mbuf free       = P
 Queue start/stop     = Y
diff --git a/doc/guides/nics/ice.rst b/doc/guides/nics/ice.rst
index 3deeea9e6c..3d7e4ed7f1 100644
--- a/doc/guides/nics/ice.rst
+++ b/doc/guides/nics/ice.rst
@@ -323,6 +323,11 @@ The DCF PMD needs to advertise and acquire DCF capability which allows DCF to
 send AdminQ commands that it would like to execute over to the PF and receive
 responses for the same from PF.
 
+Forward Error Correction (FEC)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Supports get/set FEC mode and get FEC capability.
+
 Generic Flow Support
 ~~~~~~~~~~~~~~~~~~~~
 
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 194109b0f6..3caacfa48a 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -181,6 +181,10 @@ static int ice_timesync_read_time(struct rte_eth_dev *dev,
 static int ice_timesync_write_time(struct rte_eth_dev *dev,
 				   const struct timespec *timestamp);
 static int ice_timesync_disable(struct rte_eth_dev *dev);
+static int ice_fec_get_capability(struct rte_eth_dev *dev, struct rte_eth_fec_capa *speed_fec_capa,
+			   unsigned int num);
+static int ice_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa);
+static int ice_fec_set(struct rte_eth_dev *dev, uint32_t fec_capa);
 static const uint32_t *ice_buffer_split_supported_hdr_ptypes_get(struct rte_eth_dev *dev,
 						size_t *no_of_elements);
 
@@ -298,6 +302,9 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
 	.timesync_write_time          = ice_timesync_write_time,
 	.timesync_disable             = ice_timesync_disable,
 	.tm_ops_get                   = ice_tm_ops_get,
+	.fec_get_capability           = ice_fec_get_capability,
+	.fec_get                      = ice_fec_get,
+	.fec_set                      = ice_fec_set,
 	.buffer_split_supported_hdr_ptypes_get = ice_buffer_split_supported_hdr_ptypes_get,
 };
 
@@ -6677,6 +6684,291 @@ ice_buffer_split_supported_hdr_ptypes_get(struct rte_eth_dev *dev __rte_unused,
 	return ptypes;
 }
 
+static unsigned int
+ice_fec_get_capa_num(struct ice_aqc_get_phy_caps_data *pcaps,
+			   struct rte_eth_fec_capa *speed_fec_capa)
+{
+	unsigned int num = 0;
+	int auto_fec = (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC) ?
+		RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) : 0;
+	int link_nofec = (pcaps->link_fec_options & ICE_AQC_PHY_FEC_DIS) ?
+		RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) : 0;
+
+	if (pcaps->eee_cap & ICE_AQC_PHY_EEE_EN_100BASE_TX) {
+		if (speed_fec_capa) {
+			speed_fec_capa[num].speed = RTE_ETH_SPEED_NUM_100M;
+			speed_fec_capa[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
+		}
+		num++;
+	}
+
+	if (pcaps->eee_cap & (ICE_AQC_PHY_EEE_EN_1000BASE_T |
+		ICE_AQC_PHY_EEE_EN_1000BASE_KX)) {
+		if (speed_fec_capa) {
+			speed_fec_capa[num].speed = RTE_ETH_SPEED_NUM_1G;
+			speed_fec_capa[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
+		}
+		num++;
+	}
+
+	if (pcaps->eee_cap & (ICE_AQC_PHY_EEE_EN_10GBASE_T |
+		ICE_AQC_PHY_EEE_EN_10GBASE_KR)) {
+		if (speed_fec_capa) {
+			speed_fec_capa[num].speed = RTE_ETH_SPEED_NUM_10G;
+			speed_fec_capa[num].capa = auto_fec | link_nofec;
+
+			if (pcaps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN)
+				speed_fec_capa[num].capa |= RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
+		}
+		num++;
+	}
+
+	if (pcaps->eee_cap & ICE_AQC_PHY_EEE_EN_25GBASE_KR) {
+		if (speed_fec_capa) {
+			speed_fec_capa[num].speed = RTE_ETH_SPEED_NUM_25G;
+			speed_fec_capa[num].capa = auto_fec | link_nofec;
+
+			if (pcaps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN)
+				speed_fec_capa[num].capa |= RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
+
+			if (pcaps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
+				speed_fec_capa[num].capa |= RTE_ETH_FEC_MODE_CAPA_MASK(RS);
+		}
+		num++;
+	}
+
+	if (pcaps->eee_cap & ICE_AQC_PHY_EEE_EN_40GBASE_KR4) {
+		if (speed_fec_capa) {
+			speed_fec_capa[num].speed = RTE_ETH_SPEED_NUM_40G;
+			speed_fec_capa[num].capa = auto_fec | link_nofec;
+
+			if (pcaps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN)
+				speed_fec_capa[num].capa |= RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
+		}
+		num++;
+	}
+
+	if (pcaps->eee_cap & (ICE_AQC_PHY_EEE_EN_50GBASE_KR2 |
+		ICE_AQC_PHY_EEE_EN_50GBASE_KR_PAM4)) {
+		if (speed_fec_capa) {
+			speed_fec_capa[num].speed = RTE_ETH_SPEED_NUM_50G;
+			speed_fec_capa[num].capa = auto_fec | link_nofec;
+
+			if (pcaps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN)
+				speed_fec_capa[num].capa |= RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
+
+			if (pcaps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
+				speed_fec_capa[num].capa |= RTE_ETH_FEC_MODE_CAPA_MASK(RS);
+		}
+		num++;
+	}
+
+	if (pcaps->eee_cap & (ICE_AQC_PHY_EEE_EN_100GBASE_KR4 |
+		ICE_AQC_PHY_EEE_EN_100GBASE_KR2_PAM4)) {
+		if (speed_fec_capa) {
+			speed_fec_capa[num].speed = RTE_ETH_SPEED_NUM_100G;
+			speed_fec_capa[num].capa = auto_fec | link_nofec;
+
+			if (pcaps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN)
+				speed_fec_capa[num].capa |= RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
+
+			if (pcaps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
+				speed_fec_capa[num].capa |= RTE_ETH_FEC_MODE_CAPA_MASK(RS);
+		}
+		num++;
+	}
+
+	return num;
+}
+
+static int
+ice_fec_get_capability(struct rte_eth_dev *dev, struct rte_eth_fec_capa *speed_fec_capa,
+			   unsigned int num)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_aqc_get_phy_caps_data pcaps = {0};
+	unsigned int capa_num;
+	int ret;
+
+	ret = ice_aq_get_phy_caps(hw->port_info, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
+				  &pcaps, NULL);
+	if (ret != ICE_SUCCESS)
+		goto done;
+
+	/* first time to get capa_num */
+	capa_num = ice_fec_get_capa_num(&pcaps, NULL);
+	if (!speed_fec_capa || num < capa_num) {
+		ret = capa_num;
+		goto done;
+	}
+
+	ret = ice_fec_get_capa_num(&pcaps, speed_fec_capa);
+
+done:
+	return ret;
+}
+
+static int
+ice_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
+{
+#define FEC_CAPA_NUM 10
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+	bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
+	bool link_up;
+	u32 temp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
+	struct ice_link_status link_status = {0};
+	struct ice_aqc_get_phy_caps_data pcaps = {0};
+	struct ice_port_info *pi = hw->port_info;
+	u8 fec_config;
+	int ret;
+
+	if (!pi)
+		return -ENOTSUP;
+
+	ret = ice_get_link_info_safe(pf, enable_lse, &link_status);
+	if (ret != ICE_SUCCESS) {
+		PMD_DRV_LOG(ERR, "Failed to get link information: %d\n",
+			ret);
+		goto done;
+	}
+
+	link_up = link_status.link_info & ICE_AQ_LINK_UP;
+
+	ret = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
+				  &pcaps, NULL);
+	if (ret != ICE_SUCCESS)
+		goto done;
+
+	/* Get current FEC mode from port info */
+	if (link_up) {
+		switch (link_status.fec_info) {
+		case ICE_AQ_LINK_25G_KR_FEC_EN:
+			temp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
+			break;
+		case ICE_AQ_LINK_25G_RS_528_FEC_EN:
+			/* fall-through */
+		case ICE_AQ_LINK_25G_RS_544_FEC_EN:
+			temp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(RS);
+			break;
+		default:
+			temp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
+			break;
+		}
+		goto done;
+	}
+
+	if (pcaps.caps & ICE_AQC_PHY_EN_AUTO_FEC) {
+		temp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(AUTO);
+		return 0;
+	}
+
+	fec_config = pcaps.link_fec_options & ICE_AQC_PHY_FEC_MASK;
+
+	if (fec_config & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
+				ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
+				ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
+				ICE_AQC_PHY_FEC_25G_KR_REQ))
+		temp_fec_capa |= RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
+
+	if (fec_config & (ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN |
+				ICE_AQC_PHY_FEC_25G_RS_528_REQ |
+				ICE_AQC_PHY_FEC_25G_RS_544_REQ))
+		temp_fec_capa |= RTE_ETH_FEC_MODE_CAPA_MASK(RS);
+
+done:
+	*fec_capa = temp_fec_capa;
+	return ret;
+}
+
+static int
+ice_fec_set(struct rte_eth_dev *dev, uint32_t fec_capa)
+{
+	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_port_info *pi = hw->port_info;
+	struct ice_aqc_set_phy_cfg_data cfg = { 0 };
+	bool fec_auto = false, fec_kr = false, fec_rs = false;
+
+	if (!pi)
+		return -ENOTSUP;
+
+	if (fec_capa & ~(RTE_ETH_FEC_MODE_CAPA_MASK(AUTO) |
+		RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
+		RTE_ETH_FEC_MODE_CAPA_MASK(RS)))
+		return -EINVAL;
+	/* Copy the current user PHY configuration. The current user PHY
+	 * configuration is initialized during probe from PHY capabilities
+	 * software mode, and updated on set PHY configuration.
+	 */
+	memcpy(&cfg, &pi->phy.curr_user_phy_cfg, sizeof(cfg));
+
+	if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(AUTO))
+		fec_auto = true;
+
+	if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(BASER))
+		fec_kr = true;
+
+	if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(RS))
+		fec_rs = true;
+
+	if (fec_auto) {
+		if (fec_kr || fec_rs) {
+			if (fec_rs) {
+				cfg.link_fec_opt = ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN |
+					ICE_AQC_PHY_FEC_25G_RS_528_REQ |
+					ICE_AQC_PHY_FEC_25G_RS_544_REQ;
+			}
+			if (fec_kr) {
+				cfg.link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
+					ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
+					ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
+					ICE_AQC_PHY_FEC_25G_KR_REQ;
+			}
+		} else {
+			cfg.link_fec_opt = ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN |
+				ICE_AQC_PHY_FEC_25G_RS_528_REQ |
+				ICE_AQC_PHY_FEC_25G_RS_544_REQ |
+				ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
+				ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
+				ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
+				ICE_AQC_PHY_FEC_25G_KR_REQ;
+		}
+	} else {
+		if (fec_kr ^ fec_rs) {
+			if (fec_rs) {
+				cfg.link_fec_opt = ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN |
+					ICE_AQC_PHY_FEC_25G_RS_528_REQ |
+					ICE_AQC_PHY_FEC_25G_RS_544_REQ;
+			} else {
+				cfg.link_fec_opt = ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
+					ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
+					ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
+					ICE_AQC_PHY_FEC_25G_KR_REQ;
+			}
+		} else {
+			return -EINVAL;
+		}
+	}
+
+	/* Recovery of accidentally rewritten bit */
+	if (pi->phy.curr_user_phy_cfg.link_fec_opt &
+		~ICE_AQC_PHY_FEC_MASK)
+		cfg.link_fec_opt |= ICE_AQC_PHY_FEC_DIS;
+	else
+		cfg.link_fec_opt &= ICE_AQC_PHY_FEC_MASK;
+
+	/* Proceed only if requesting different FEC mode */
+	if (pi->phy.curr_user_phy_cfg.link_fec_opt == cfg.link_fec_opt)
+		return 0;
+
+	cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+
+	if (ice_aq_set_phy_cfg(pi->hw, pi, &cfg, NULL))
+		return -EAGAIN;
+
+	return 0;
+}
+
 static int
 ice_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 	      struct rte_pci_device *pci_dev)
-- 
2.25.1


  parent reply	other threads:[~2024-07-02  8:25 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-04-11  9:45 [PATCH] " Mingjin Ye
2024-06-27 17:07 ` Medvedkin, Vladimir
2024-07-02  8:02 ` Mingjin Ye [this message]
2024-07-03 18:31   ` [PATCH v2] " Medvedkin, Vladimir
2024-07-04  6:50   ` [PATCH v3] " Mingjin Ye
2024-07-04 12:00     ` Medvedkin, Vladimir

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240702080244.1190884-1-mingjinx.ye@intel.com \
    --to=mingjinx.ye@intel.com \
    --cc=dev@dpdk.org \
    --cc=qiming.yang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).