From: "Zhang, Qi Z" <qi.z.zhang@intel.com>
To: "Jiang, JunyuX" <junyux.jiang@intel.com>, "dev@dpdk.org" <dev@dpdk.org>
Cc: "Yang, Qiming" <qiming.yang@intel.com>, "Su, Simei" <simei.su@intel.com>
Subject: Re: [dpdk-dev] [PATCH v2] net/ice: support based RSS configure
Date: Mon, 22 Jun 2020 14:36:33 +0000 [thread overview]
Message-ID: <039ED4275CED7440929022BC67E7061154842F8A@SHSMSX103.ccr.corp.intel.com> (raw)
In-Reply-To: <20200622053312.46719-1-junyux.jiang@intel.com>
> -----Original Message-----
> From: Jiang, JunyuX <junyux.jiang@intel.com>
> Sent: Monday, June 22, 2020 1:33 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming <qiming.yang@intel.com>; Zhang, Qi Z
> <qi.z.zhang@intel.com>; Su, Simei <simei.su@intel.com>; Jiang, JunyuX
> <junyux.jiang@intel.com>
> Subject: [PATCH v2] net/ice: support based RSS configure
The title is misleading, how about "initialize and update RSS based on user request"
>
> Enable/disable RSS for corresponding flow base on the user's requirement
Initialize and update RSS configure based on user request (rte_eth_rss_conf) from dev_configure and .rss_hash_update ops.
All previous default configure has been removed.
.
>
> Signed-off-by: Junyu Jiang <junyux.jiang@intel.com>
>
> ---
> v1->v2:
> remove gtpu and pppoe/pppod configuration from rss init
> ---
> drivers/net/ice/ice_ethdev.c | 162 +++++++++++++++++++++--------------
> 1 file changed, 96 insertions(+), 66 deletions(-)
>
> diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index
> 5a89a1955..cbe59a40e 100644
> --- a/drivers/net/ice/ice_ethdev.c
> +++ b/drivers/net/ice/ice_ethdev.c
> @@ -2441,6 +2441,87 @@ ice_dev_uninit(struct rte_eth_dev *dev)
> return 0;
> }
>
> +static void
> +ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf) {
> + struct ice_hw *hw = ICE_PF_TO_HW(pf);
> + struct ice_vsi *vsi = pf->main_vsi;
> + int ret;
> +
> + /**
> + * configure RSS for IPv4 with input set IPv4 src/dst
> + * configure RSS for IPv6 with input set IPv6 src/dst
The comment looks redundant, please merge into one line.
/* Configure RSS for IP with src/dst address as input set */
> + */
> + if (rss_hf & ETH_RSS_IP) {
This is not correct, it is possible user only want IPv4 but not IPv6.
For ice, I think we can do like below
If (rss_hf & ETH_RSS_IPV4) {
Ice_add_rss_cfg (... ICE_FLOW_HASH_IPV4 ...)
}
If (rss_hf & ETH_RSS_IPV6) {
Ice_add_rss_cfg (... ICE_FLOW_HASH_IPv6 ...)
}
We can just ignore ETH_RSS_FRAG_IPV4 and ETH_RSS_NONFRAG_IPV4_OTHER and same for UDP/TCP and SCTP.
> + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
> + ICE_FLOW_SEG_HDR_IPV4, 0);
You need ICE_FLOW_SEG_HDR_IPV_OTHER, see patch
http://patchwork.dpdk.org/patch/71584/
> + if (ret)
> + PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d",
> + __func__, ret);
> +
> + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
> + ICE_FLOW_SEG_HDR_IPV6, 0);
> + if (ret)
> + PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d",
> + __func__, ret);
> + }
> + /**
> + *configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst
> + *configure RSS for udp4 with input set IP src/dst, UDP src/dst
> + */
> + if (rss_hf & ETH_RSS_UDP) {
> + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6,
> + ICE_FLOW_SEG_HDR_UDP |
> + ICE_FLOW_SEG_HDR_IPV6, 0);
> + if (ret)
> + PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d",
> + __func__, ret);
> +
> + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4,
> + ICE_FLOW_SEG_HDR_UDP |
> + ICE_FLOW_SEG_HDR_IPV4, 0);
> + if (ret)
> + PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d",
> + __func__, ret);
> + }
> + /**
> + * configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst
> + * configure RSS for tcp4 with input set IP src/dst, TCP src/dst
> + */
> + if (rss_hf & ETH_RSS_TCP) {
> + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6,
> + ICE_FLOW_SEG_HDR_TCP |
> + ICE_FLOW_SEG_HDR_IPV6, 0);
> + if (ret)
> + PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d",
> + __func__, ret);
> + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4,
> + ICE_FLOW_SEG_HDR_TCP |
> + ICE_FLOW_SEG_HDR_IPV4, 0);
> + if (ret)
> + PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d",
> + __func__, ret);
> + }
> + /**
> + * configure RSS for sctp6 with input set IPv6 src/dst
> + * configure RSS for sctp4 with input set IP src/dst
> + */
> + if (rss_hf & ETH_RSS_SCTP) {
> + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
> + ICE_FLOW_SEG_HDR_SCTP |
> + ICE_FLOW_SEG_HDR_IPV6, 0);
> + if (ret)
> + PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
> + __func__, ret);
> + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
> + ICE_FLOW_SEG_HDR_SCTP |
> + ICE_FLOW_SEG_HDR_IPV4, 0);
> + if (ret)
> + PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
> + __func__, ret);
> + }
> +}
> +
> static int ice_init_rss(struct ice_pf *pf) {
> struct ice_hw *hw = ICE_PF_TO_HW(pf);
> @@ -2501,72 +2582,9 @@ static int ice_init_rss(struct ice_pf *pf)
> (1 << VSIQF_HASH_CTL_HASH_SCHEME_S);
> ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg);
>
> - /* configure RSS for IPv4 with input set IPv4 src/dst */
> - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
> - ICE_FLOW_SEG_HDR_IPV4, 0);
> - if (ret)
> - PMD_DRV_LOG(ERR, "%s IPV4 rss flow fail %d", __func__, ret);
> -
> - /* configure RSS for IPv6 with input set IPv6 src/dst */
> - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
> - ICE_FLOW_SEG_HDR_IPV6, 0);
> - if (ret)
> - PMD_DRV_LOG(ERR, "%s IPV6 rss flow fail %d", __func__, ret);
> -
> - /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
> - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6,
> - ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6,
> 0);
> - if (ret)
> - PMD_DRV_LOG(ERR, "%s TCP_IPV6 rss flow fail %d", __func__, ret);
> -
> - /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
> - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6,
> - ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6,
> 0);
> - if (ret)
> - PMD_DRV_LOG(ERR, "%s UDP_IPV6 rss flow fail %d", __func__, ret);
> -
> - /* configure RSS for sctp6 with input set IPv6 src/dst */
> - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
> - ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6,
> 0);
> - if (ret)
> - PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
> - __func__, ret);
> -
> - /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
> - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4,
> - ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4,
> 0);
> - if (ret)
> - PMD_DRV_LOG(ERR, "%s TCP_IPV4 rss flow fail %d", __func__, ret);
> -
> - /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
> - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4,
> - ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4,
> 0);
> - if (ret)
> - PMD_DRV_LOG(ERR, "%s UDP_IPV4 rss flow fail %d", __func__, ret);
> -
> - /* configure RSS for sctp4 with input set IP src/dst */
> - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
> - ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4,
> 0);
> - if (ret)
> - PMD_DRV_LOG(ERR, "%s SCTP_IPV4 rss flow fail %d",
> - __func__, ret);
> -
> - /* configure RSS for gtpu with input set TEID */
> - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_GTP_U_IPV4_TEID,
> - ICE_FLOW_SEG_HDR_GTPU_IP, 0);
> - if (ret)
> - PMD_DRV_LOG(ERR, "%s GTPU_TEID rss flow fail %d",
> - __func__, ret);
>
> - /**
> - * configure RSS for pppoe/pppod with input set
> - * Source MAC and Session ID
> - */
> - ret = ice_add_rss_cfg(hw, vsi->idx,
> ICE_FLOW_HASH_PPPOE_SESS_ID_ETH,
> - ICE_FLOW_SEG_HDR_PPPOE, 0);
> - if (ret)
> - PMD_DRV_LOG(ERR, "%s PPPoE/PPPoD_SessionID rss flow fail %d",
> - __func__, ret);
> + /* RSS hash configuration */
> + ice_rss_hash_set(pf, rss_conf->rss_hf);
>
> return 0;
> }
> @@ -3680,6 +3698,7 @@ ice_rss_hash_update(struct rte_eth_dev *dev, {
> enum ice_status status = ICE_SUCCESS;
> struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> + struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> struct ice_vsi *vsi = pf->main_vsi;
>
> /* set hash key */
> @@ -3687,7 +3706,18 @@ ice_rss_hash_update(struct rte_eth_dev *dev,
> if (status)
> return status;
>
> - /* TODO: hash enable config, ice_add_rss_cfg */
> + if (rss_conf->rss_hf == 0)
> + return -EINVAL;
> +
> + status = ice_rem_vsi_rss_cfg(hw, vsi->idx);
> + if (status != ICE_SUCCESS) {
> + PMD_DRV_LOG(ERR, "Failed to remove rss cfg!");
> + return status;
> + }
No need to remove exist configure,
we just need to make sure all the configure from rss_hf has been applied.
> +
> + /* RSS hash configuration */
> + ice_rss_hash_set(pf, rss_conf->rss_hf);
> +
> return 0;
> }
>
> --
> 2.17.1
next prev parent reply other threads:[~2020-06-22 14:36 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-06-10 6:33 [dpdk-dev] [PATCH] " Junyu Jiang
2020-06-22 5:33 ` [dpdk-dev] [PATCH v2] " Junyu Jiang
2020-06-22 14:36 ` Zhang, Qi Z [this message]
2020-06-23 2:47 ` Jiang, JunyuX
2020-07-03 8:16 ` Zhao1, Wei
2020-07-03 8:46 ` Zhao1, Wei
2020-06-23 8:33 ` [dpdk-dev] [PATCH v3] net/ice: initialize and update RSS based on user request Junyu Jiang
2020-06-23 9:40 ` Zhang, Qi Z
2020-06-24 2:09 ` [dpdk-dev] [PATCH v4] " Junyu Jiang
2020-06-24 7:30 ` Zhang, Qi Z
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=039ED4275CED7440929022BC67E7061154842F8A@SHSMSX103.ccr.corp.intel.com \
--to=qi.z.zhang@intel.com \
--cc=dev@dpdk.org \
--cc=junyux.jiang@intel.com \
--cc=qiming.yang@intel.com \
--cc=simei.su@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).