* [dpdk-dev] [PATCH] net/ice: fix GTPU down/uplink and extension conflict @ 2020-07-24 2:10 Simei Su 2020-07-24 7:13 ` Jeff Guo 2020-07-24 14:41 ` [dpdk-dev] [PATCH v2] " Simei Su 0 siblings, 2 replies; 8+ messages in thread From: Simei Su @ 2020-07-24 2:10 UTC (permalink / raw) To: qi.z.zhang, beilei.xing; +Cc: dev, jia.guo, Simei Su When adding a RSS rule with GTPU_DWN/UP, it will search profile table from the top index. If a RSS rule with GTPU_EH already exists, then GTPU_DWN/UP packet will match GTPU_EH profile. This patch solves this issue by removing existed GTPU_EH rule before creating a new GTPU_DWN/UP rule. Fixes: 2e2810fc1868 ("net/ice: fix GTPU RSS") Signed-off-by: Simei Su <simei.su@intel.com> --- drivers/net/ice/ice_ethdev.c | 47 +++++++++++++++ drivers/net/ice/ice_ethdev.h | 15 +++++ drivers/net/ice/ice_hash.c | 139 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 201 insertions(+) diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index a4a0390..8839146 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -2538,6 +2538,11 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) if (ret) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4 rss flow fail %d", __func__, ret); + /* Store hash field and header for gtpu_eh ipv4 */ + pf->gtpu_eh.ipv4.hash_fld = ICE_FLOW_HASH_IPV4; + pf->gtpu_eh.ipv4.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER; ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4, ICE_FLOW_SEG_HDR_PPPOE | @@ -2564,6 +2569,11 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) if (ret) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6 rss flow fail %d", __func__, ret); + /* Store hash field and header for gtpu_eh ipv6 */ + pf->gtpu_eh.ipv6.hash_fld = ICE_FLOW_HASH_IPV6; + pf->gtpu_eh.ipv6.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER; ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6, ICE_FLOW_SEG_HDR_PPPOE | @@ -2586,6 +2596,9 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) if (ret) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_UDP rss flow fail %d", __func__, ret); + /* Store hash field and header for gtpu_eh ipv4_udp */ + pf->gtpu_eh.ipv4_udp.hash_fld = ICE_HASH_UDP_IPV4; + pf->gtpu_eh.ipv4_udp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH; ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4, ICE_FLOW_SEG_HDR_PPPOE, 0); @@ -2606,6 +2619,9 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) if (ret) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_UDP rss flow fail %d", __func__, ret); + /* Store hash field and header for gtpu_eh ipv6_udp */ + pf->gtpu_eh.ipv6_udp.hash_fld = ICE_HASH_UDP_IPV6; + pf->gtpu_eh.ipv6_udp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH; ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6, ICE_FLOW_SEG_HDR_PPPOE, 0); @@ -2626,6 +2642,9 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) if (ret) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_TCP rss flow fail %d", __func__, ret); + /* Store hash field and header for gtpu_eh ipv4_tcp */ + pf->gtpu_eh.ipv4_tcp.hash_fld = ICE_HASH_TCP_IPV4; + pf->gtpu_eh.ipv4_tcp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH; ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4, ICE_FLOW_SEG_HDR_PPPOE, 0); @@ -2646,6 +2665,9 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) if (ret) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_TCP rss flow fail %d", __func__, ret); + /* Store hash field and header for gtpu_eh ipv6_tcp */ + pf->gtpu_eh.ipv6_tcp.hash_fld = ICE_HASH_TCP_IPV6; + pf->gtpu_eh.ipv6_tcp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH; ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6, ICE_FLOW_SEG_HDR_PPPOE, 0); @@ -2695,6 +2717,28 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) } } +static void +ice_rss_ctx_init(struct ice_pf *pf) +{ + pf->gtpu_eh.ipv4.hash_fld = 0; + pf->gtpu_eh.ipv4.pkt_hdr = 0; + + pf->gtpu_eh.ipv6.hash_fld = 0; + pf->gtpu_eh.ipv6.pkt_hdr = 0; + + pf->gtpu_eh.ipv4_udp.hash_fld = 0; + pf->gtpu_eh.ipv4_udp.pkt_hdr = 0; + + pf->gtpu_eh.ipv6_udp.hash_fld = 0; + pf->gtpu_eh.ipv6_udp.pkt_hdr = 0; + + pf->gtpu_eh.ipv4_tcp.hash_fld = 0; + pf->gtpu_eh.ipv4_tcp.pkt_hdr = 0; + + pf->gtpu_eh.ipv6_tcp.hash_fld = 0; + pf->gtpu_eh.ipv6_tcp.pkt_hdr = 0; +} + static int ice_init_rss(struct ice_pf *pf) { struct ice_hw *hw = ICE_PF_TO_HW(pf); @@ -2755,6 +2799,9 @@ static int ice_init_rss(struct ice_pf *pf) (1 << VSIQF_HASH_CTL_HASH_SCHEME_S); ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg); + /* Initialize RSS context for gtpu_eh */ + ice_rss_ctx_init(pf); + /* RSS hash configuration */ ice_rss_hash_set(pf, rss_conf->rss_hf); diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index 87984ef..1baf0b4 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -358,6 +358,20 @@ struct ice_fdir_info { struct ice_fdir_counter_pool_container counter; }; +struct ice_gtpu_eh { + uint32_t pkt_hdr; + uint64_t hash_fld; +}; + +struct ice_hash_gtpu_eh { + struct ice_gtpu_eh ipv4; + struct ice_gtpu_eh ipv6; + struct ice_gtpu_eh ipv4_udp; + struct ice_gtpu_eh ipv6_udp; + struct ice_gtpu_eh ipv4_tcp; + struct ice_gtpu_eh ipv6_tcp; +}; + struct ice_pf { struct ice_adapter *adapter; /* The adapter this PF associate to */ struct ice_vsi *main_vsi; /* pointer to main VSI structure */ @@ -381,6 +395,7 @@ struct ice_pf { uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */ uint16_t fdir_qp_offset; struct ice_fdir_info fdir; /* flow director info */ + struct ice_hash_gtpu_eh gtpu_eh; uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; struct ice_hw_port_stats stats_offset; diff --git a/drivers/net/ice/ice_hash.c b/drivers/net/ice/ice_hash.c index e535e4b..dd70353 100644 --- a/drivers/net/ice/ice_hash.c +++ b/drivers/net/ice/ice_hash.c @@ -1232,6 +1232,117 @@ struct ice_hash_match_type ice_hash_type_list[] = { } static int +ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr, uint64_t fld) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_vsi *vsi = pf->main_vsi; + int ret; + + /** + * If header field contains GTPU_EH, store gtpu_eh context. + * If header field contains GTPU_DWN/UP, remove existed gtpu_eh. + */ + if ((hdr & ICE_FLOW_SEG_HDR_GTPU_EH) && + (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN | + ICE_FLOW_SEG_HDR_GTPU_UP)) == 0) { + if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + pf->gtpu_eh.ipv4_udp.pkt_hdr = hdr; + pf->gtpu_eh.ipv4_udp.hash_fld = fld; + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + pf->gtpu_eh.ipv6_udp.pkt_hdr = hdr; + pf->gtpu_eh.ipv6_udp.hash_fld = fld; + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + pf->gtpu_eh.ipv4_tcp.pkt_hdr = hdr; + pf->gtpu_eh.ipv4_tcp.hash_fld = fld; + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + pf->gtpu_eh.ipv6_tcp.pkt_hdr = hdr; + pf->gtpu_eh.ipv6_tcp.hash_fld = fld; + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & (ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_TCP)) == 0) { + pf->gtpu_eh.ipv4.pkt_hdr = hdr; + pf->gtpu_eh.ipv4.hash_fld = fld; + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & (ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_TCP)) == 0) { + pf->gtpu_eh.ipv6.pkt_hdr = hdr; + pf->gtpu_eh.ipv6.hash_fld = fld; + } + } else if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN | + ICE_FLOW_SEG_HDR_GTPU_UP)) { + if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + if (pf->gtpu_eh.ipv4_udp.hash_fld && + pf->gtpu_eh.ipv4_udp.pkt_hdr) { + ret = ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_eh.ipv4_udp.hash_fld, + pf->gtpu_eh.ipv4_udp.pkt_hdr); + if (ret) + return -rte_errno; + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + if (pf->gtpu_eh.ipv6_udp.hash_fld && + pf->gtpu_eh.ipv6_udp.pkt_hdr) { + ret = ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_eh.ipv6_udp.hash_fld, + pf->gtpu_eh.ipv6_udp.pkt_hdr); + if (ret) + return -rte_errno; + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + if (pf->gtpu_eh.ipv4_tcp.hash_fld && + pf->gtpu_eh.ipv4_tcp.pkt_hdr) { + ret = ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_eh.ipv4_tcp.hash_fld, + pf->gtpu_eh.ipv4_tcp.pkt_hdr); + if (ret) + return -rte_errno; + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + if (pf->gtpu_eh.ipv6_tcp.hash_fld && + pf->gtpu_eh.ipv6_tcp.pkt_hdr) { + ret = ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_eh.ipv6_tcp.hash_fld, + pf->gtpu_eh.ipv6_tcp.pkt_hdr); + if (ret) + return -rte_errno; + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & (ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_TCP)) == 0) { + if (pf->gtpu_eh.ipv4.hash_fld && + pf->gtpu_eh.ipv4.pkt_hdr) { + ret = ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_eh.ipv4.hash_fld, + pf->gtpu_eh.ipv4.pkt_hdr); + if (ret) + return -rte_errno; + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & (ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_TCP)) == 0) { + if (pf->gtpu_eh.ipv6.hash_fld && + pf->gtpu_eh.ipv6.pkt_hdr) { + ret = ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_eh.ipv6.hash_fld, + pf->gtpu_eh.ipv6.pkt_hdr); + if (ret) + return -rte_errno; + } + } + } + + return 0; +} + +static int ice_hash_create(struct ice_adapter *ad, struct rte_flow *flow, void *meta, @@ -1248,6 +1359,10 @@ struct ice_hash_match_type ice_hash_type_list[] = { uint64_t hash_field = ((struct rss_meta *)meta)->hash_flds; uint8_t hash_function = ((struct rss_meta *)meta)->hash_function; + ret = ice_add_rss_cfg_pre(pf, headermask, hash_field); + if (ret) + return -rte_errno; + filter_ptr = rte_zmalloc("ice_rss_filter", sizeof(struct ice_hash_flow_cfg), 0); if (!filter_ptr) { @@ -1297,6 +1412,28 @@ struct ice_hash_match_type ice_hash_type_list[] = { return -rte_errno; } +static void +ice_rem_rss_cfg_post(struct ice_pf *pf) +{ + pf->gtpu_eh.ipv4.hash_fld = 0; + pf->gtpu_eh.ipv4.pkt_hdr = 0; + + pf->gtpu_eh.ipv6.hash_fld = 0; + pf->gtpu_eh.ipv6.pkt_hdr = 0; + + pf->gtpu_eh.ipv4_udp.hash_fld = 0; + pf->gtpu_eh.ipv4_udp.pkt_hdr = 0; + + pf->gtpu_eh.ipv6_udp.hash_fld = 0; + pf->gtpu_eh.ipv6_udp.pkt_hdr = 0; + + pf->gtpu_eh.ipv4_tcp.hash_fld = 0; + pf->gtpu_eh.ipv4_tcp.pkt_hdr = 0; + + pf->gtpu_eh.ipv6_tcp.hash_fld = 0; + pf->gtpu_eh.ipv6_tcp.pkt_hdr = 0; +} + static int ice_hash_destroy(struct ice_adapter *ad, struct rte_flow *flow, @@ -1334,6 +1471,8 @@ struct ice_hash_match_type ice_hash_type_list[] = { } } + ice_rem_rss_cfg_post(pf); + rte_free(filter_ptr); return 0; -- 1.8.3.1 ^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [dpdk-dev] [PATCH] net/ice: fix GTPU down/uplink and extension conflict 2020-07-24 2:10 [dpdk-dev] [PATCH] net/ice: fix GTPU down/uplink and extension conflict Simei Su @ 2020-07-24 7:13 ` Jeff Guo 2020-07-24 14:19 ` Su, Simei 2020-07-24 14:41 ` [dpdk-dev] [PATCH v2] " Simei Su 1 sibling, 1 reply; 8+ messages in thread From: Jeff Guo @ 2020-07-24 7:13 UTC (permalink / raw) To: Simei Su, qi.z.zhang, beilei.xing; +Cc: dev hi, simei On 7/24/2020 10:10 AM, Simei Su wrote: > When adding a RSS rule with GTPU_DWN/UP, it will search profile > table from the top index. If a RSS rule with GTPU_EH already exists, > then GTPU_DWN/UP packet will match GTPU_EH profile. This patch solves > this issue by removing existed GTPU_EH rule before creating a new > GTPU_DWN/UP rule. Suggest interpret the relation ship bettween GTPU_EH_UPLINK/DWNLINK with GTPU_EH to help knowledge the reason. > Fixes: 2e2810fc1868 ("net/ice: fix GTPU RSS") > > Signed-off-by: Simei Su <simei.su@intel.com> > --- > drivers/net/ice/ice_ethdev.c | 47 +++++++++++++++ > drivers/net/ice/ice_ethdev.h | 15 +++++ > drivers/net/ice/ice_hash.c | 139 +++++++++++++++++++++++++++++++++++++++++++ > 3 files changed, 201 insertions(+) > > diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c > index a4a0390..8839146 100644 > --- a/drivers/net/ice/ice_ethdev.c > +++ b/drivers/net/ice/ice_ethdev.c > @@ -2538,6 +2538,11 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) > if (ret) > PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4 rss flow fail %d", > __func__, ret); A blank line need. > + /* Store hash field and header for gtpu_eh ipv4 */ > + pf->gtpu_eh.ipv4.hash_fld = ICE_FLOW_HASH_IPV4; > + pf->gtpu_eh.ipv4.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH | > + ICE_FLOW_SEG_HDR_IPV4 | > + ICE_FLOW_SEG_HDR_IPV_OTHER; > > ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4, > ICE_FLOW_SEG_HDR_PPPOE | > @@ -2564,6 +2569,11 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) > if (ret) > PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6 rss flow fail %d", > __func__, ret); > + /* Store hash field and header for gtpu_eh ipv6 */ > + pf->gtpu_eh.ipv6.hash_fld = ICE_FLOW_HASH_IPV6; > + pf->gtpu_eh.ipv6.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH | > + ICE_FLOW_SEG_HDR_IPV6 | > + ICE_FLOW_SEG_HDR_IPV_OTHER; > > ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6, > ICE_FLOW_SEG_HDR_PPPOE | > @@ -2586,6 +2596,9 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) > if (ret) > PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_UDP rss flow fail %d", > __func__, ret); > + /* Store hash field and header for gtpu_eh ipv4_udp */ > + pf->gtpu_eh.ipv4_udp.hash_fld = ICE_HASH_UDP_IPV4; > + pf->gtpu_eh.ipv4_udp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH; > > ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4, > ICE_FLOW_SEG_HDR_PPPOE, 0); > @@ -2606,6 +2619,9 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) > if (ret) > PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_UDP rss flow fail %d", > __func__, ret); > + /* Store hash field and header for gtpu_eh ipv6_udp */ > + pf->gtpu_eh.ipv6_udp.hash_fld = ICE_HASH_UDP_IPV6; > + pf->gtpu_eh.ipv6_udp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH; > > ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6, > ICE_FLOW_SEG_HDR_PPPOE, 0); > @@ -2626,6 +2642,9 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) > if (ret) > PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_TCP rss flow fail %d", > __func__, ret); > + /* Store hash field and header for gtpu_eh ipv4_tcp */ > + pf->gtpu_eh.ipv4_tcp.hash_fld = ICE_HASH_TCP_IPV4; > + pf->gtpu_eh.ipv4_tcp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH; > > ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4, > ICE_FLOW_SEG_HDR_PPPOE, 0); > @@ -2646,6 +2665,9 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) > if (ret) > PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_TCP rss flow fail %d", > __func__, ret); > + /* Store hash field and header for gtpu_eh ipv6_tcp */ > + pf->gtpu_eh.ipv6_tcp.hash_fld = ICE_HASH_TCP_IPV6; > + pf->gtpu_eh.ipv6_tcp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH; > > ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6, > ICE_FLOW_SEG_HDR_PPPOE, 0); > @@ -2695,6 +2717,28 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) > } > } > > +static void > +ice_rss_ctx_init(struct ice_pf *pf) > +{ > + pf->gtpu_eh.ipv4.hash_fld = 0; > + pf->gtpu_eh.ipv4.pkt_hdr = 0; > + > + pf->gtpu_eh.ipv6.hash_fld = 0; > + pf->gtpu_eh.ipv6.pkt_hdr = 0; > + > + pf->gtpu_eh.ipv4_udp.hash_fld = 0; > + pf->gtpu_eh.ipv4_udp.pkt_hdr = 0; > + > + pf->gtpu_eh.ipv6_udp.hash_fld = 0; > + pf->gtpu_eh.ipv6_udp.pkt_hdr = 0; > + > + pf->gtpu_eh.ipv4_tcp.hash_fld = 0; > + pf->gtpu_eh.ipv4_tcp.pkt_hdr = 0; > + > + pf->gtpu_eh.ipv6_tcp.hash_fld = 0; > + pf->gtpu_eh.ipv6_tcp.pkt_hdr = 0; > +} > + > static int ice_init_rss(struct ice_pf *pf) > { > struct ice_hw *hw = ICE_PF_TO_HW(pf); > @@ -2755,6 +2799,9 @@ static int ice_init_rss(struct ice_pf *pf) > (1 << VSIQF_HASH_CTL_HASH_SCHEME_S); > ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg); > > + /* Initialize RSS context for gtpu_eh */ > + ice_rss_ctx_init(pf); > + > /* RSS hash configuration */ > ice_rss_hash_set(pf, rss_conf->rss_hf); > > diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h > index 87984ef..1baf0b4 100644 > --- a/drivers/net/ice/ice_ethdev.h > +++ b/drivers/net/ice/ice_ethdev.h > @@ -358,6 +358,20 @@ struct ice_fdir_info { > struct ice_fdir_counter_pool_container counter; > }; > > +struct ice_gtpu_eh { > + uint32_t pkt_hdr; > + uint64_t hash_fld; > +}; > + The naming "ice_gtpu_eh" is not clear, ice_hash_gtpu_eh_ctx? > +struct ice_hash_gtpu_eh { > + struct ice_gtpu_eh ipv4; > + struct ice_gtpu_eh ipv6; > + struct ice_gtpu_eh ipv4_udp; > + struct ice_gtpu_eh ipv6_udp; > + struct ice_gtpu_eh ipv4_tcp; > + struct ice_gtpu_eh ipv6_tcp; > +}; > + I think you don't need to define struct for each pattern and set/unset value for all of them, what you considate just 3 item, that are hdr, hash filed and the pattern type, so you could just defined as below struct ice_hash_gtpu_eh_ctx { uint32_t pkt_hdr; uint64_t hash_fld; uint64_t hdr_hint; // for example: hdr = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_UDP } and then only check this hdr_hint when set uplink/dwnlink if(hdr & pf->gtpu_eh_ctx->hdr_hint) ice_rem_rss_cfg > struct ice_pf { > struct ice_adapter *adapter; /* The adapter this PF associate to */ > struct ice_vsi *main_vsi; /* pointer to main VSI structure */ > @@ -381,6 +395,7 @@ struct ice_pf { > uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */ > uint16_t fdir_qp_offset; > struct ice_fdir_info fdir; /* flow director info */ > + struct ice_hash_gtpu_eh gtpu_eh; > uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; > uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; > struct ice_hw_port_stats stats_offset; > diff --git a/drivers/net/ice/ice_hash.c b/drivers/net/ice/ice_hash.c > index e535e4b..dd70353 100644 > --- a/drivers/net/ice/ice_hash.c > +++ b/drivers/net/ice/ice_hash.c > @@ -1232,6 +1232,117 @@ struct ice_hash_match_type ice_hash_type_list[] = { > } > > static int > +ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr, uint64_t fld) > +{ > + struct ice_hw *hw = ICE_PF_TO_HW(pf); > + struct ice_vsi *vsi = pf->main_vsi; > + int ret; > + > + /** > + * If header field contains GTPU_EH, store gtpu_eh context. > + * If header field contains GTPU_DWN/UP, remove existed gtpu_eh. > + */ > + if ((hdr & ICE_FLOW_SEG_HDR_GTPU_EH) && > + (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN | > + ICE_FLOW_SEG_HDR_GTPU_UP)) == 0) { No need to check !=DWN/UP here, EH/DWN/UP are mutual exclusion, they also handler when parse pattern. > + if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && > + (hdr & ICE_FLOW_SEG_HDR_UDP)) { Alignment should match open parenthesis. Below is the same. > + pf->gtpu_eh.ipv4_udp.pkt_hdr = hdr; > + pf->gtpu_eh.ipv4_udp.hash_fld = fld; > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && > + (hdr & ICE_FLOW_SEG_HDR_UDP)) { > + pf->gtpu_eh.ipv6_udp.pkt_hdr = hdr; > + pf->gtpu_eh.ipv6_udp.hash_fld = fld; > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && > + (hdr & ICE_FLOW_SEG_HDR_TCP)) { > + pf->gtpu_eh.ipv4_tcp.pkt_hdr = hdr; > + pf->gtpu_eh.ipv4_tcp.hash_fld = fld; > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && > + (hdr & ICE_FLOW_SEG_HDR_TCP)) { > + pf->gtpu_eh.ipv6_tcp.pkt_hdr = hdr; > + pf->gtpu_eh.ipv6_tcp.hash_fld = fld; > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && > + (hdr & (ICE_FLOW_SEG_HDR_UDP | > + ICE_FLOW_SEG_HDR_TCP)) == 0) { > + pf->gtpu_eh.ipv4.pkt_hdr = hdr; > + pf->gtpu_eh.ipv4.hash_fld = fld; > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && > + (hdr & (ICE_FLOW_SEG_HDR_UDP | > + ICE_FLOW_SEG_HDR_TCP)) == 0) { > + pf->gtpu_eh.ipv6.pkt_hdr = hdr; > + pf->gtpu_eh.ipv6.hash_fld = fld; > + } > + } else if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN | > + ICE_FLOW_SEG_HDR_GTPU_UP)) { > + if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && > + (hdr & ICE_FLOW_SEG_HDR_UDP)) { > + if (pf->gtpu_eh.ipv4_udp.hash_fld && > + pf->gtpu_eh.ipv4_udp.pkt_hdr) { > + ret = ice_rem_rss_cfg(hw, vsi->idx, > + pf->gtpu_eh.ipv4_udp.hash_fld, > + pf->gtpu_eh.ipv4_udp.pkt_hdr); > + if (ret) > + return -rte_errno; > + } Is it better to use a local variable and then call ice_rem_rss_cfg one time after if-else? > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && > + (hdr & ICE_FLOW_SEG_HDR_UDP)) { > + if (pf->gtpu_eh.ipv6_udp.hash_fld && > + pf->gtpu_eh.ipv6_udp.pkt_hdr) { > + ret = ice_rem_rss_cfg(hw, vsi->idx, > + pf->gtpu_eh.ipv6_udp.hash_fld, > + pf->gtpu_eh.ipv6_udp.pkt_hdr); > + if (ret) > + return -rte_errno; > + } > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && > + (hdr & ICE_FLOW_SEG_HDR_TCP)) { > + if (pf->gtpu_eh.ipv4_tcp.hash_fld && > + pf->gtpu_eh.ipv4_tcp.pkt_hdr) { > + ret = ice_rem_rss_cfg(hw, vsi->idx, > + pf->gtpu_eh.ipv4_tcp.hash_fld, > + pf->gtpu_eh.ipv4_tcp.pkt_hdr); > + if (ret) > + return -rte_errno; > + } > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && > + (hdr & ICE_FLOW_SEG_HDR_TCP)) { > + if (pf->gtpu_eh.ipv6_tcp.hash_fld && > + pf->gtpu_eh.ipv6_tcp.pkt_hdr) { > + ret = ice_rem_rss_cfg(hw, vsi->idx, > + pf->gtpu_eh.ipv6_tcp.hash_fld, > + pf->gtpu_eh.ipv6_tcp.pkt_hdr); > + if (ret) > + return -rte_errno; > + } > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && > + (hdr & (ICE_FLOW_SEG_HDR_UDP | > + ICE_FLOW_SEG_HDR_TCP)) == 0) { > + if (pf->gtpu_eh.ipv4.hash_fld && > + pf->gtpu_eh.ipv4.pkt_hdr) { > + ret = ice_rem_rss_cfg(hw, vsi->idx, > + pf->gtpu_eh.ipv4.hash_fld, > + pf->gtpu_eh.ipv4.pkt_hdr); > + if (ret) > + return -rte_errno; > + } > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && > + (hdr & (ICE_FLOW_SEG_HDR_UDP | > + ICE_FLOW_SEG_HDR_TCP)) == 0) { > + if (pf->gtpu_eh.ipv6.hash_fld && > + pf->gtpu_eh.ipv6.pkt_hdr) { > + ret = ice_rem_rss_cfg(hw, vsi->idx, > + pf->gtpu_eh.ipv6.hash_fld, > + pf->gtpu_eh.ipv6.pkt_hdr); > + if (ret) > + return -rte_errno; > + } > + } > + } > + > + return 0; > +} > + > +static int > ice_hash_create(struct ice_adapter *ad, > struct rte_flow *flow, > void *meta, > @@ -1248,6 +1359,10 @@ struct ice_hash_match_type ice_hash_type_list[] = { > uint64_t hash_field = ((struct rss_meta *)meta)->hash_flds; > uint8_t hash_function = ((struct rss_meta *)meta)->hash_function; > > + ret = ice_add_rss_cfg_pre(pf, headermask, hash_field); > + if (ret) > + return -rte_errno; > + > filter_ptr = rte_zmalloc("ice_rss_filter", > sizeof(struct ice_hash_flow_cfg), 0); > if (!filter_ptr) { > @@ -1297,6 +1412,28 @@ struct ice_hash_match_type ice_hash_type_list[] = { > return -rte_errno; > } > > +static void > +ice_rem_rss_cfg_post(struct ice_pf *pf) > +{ > + pf->gtpu_eh.ipv4.hash_fld = 0; > + pf->gtpu_eh.ipv4.pkt_hdr = 0; > + > + pf->gtpu_eh.ipv6.hash_fld = 0; > + pf->gtpu_eh.ipv6.pkt_hdr = 0; > + > + pf->gtpu_eh.ipv4_udp.hash_fld = 0; > + pf->gtpu_eh.ipv4_udp.pkt_hdr = 0; > + > + pf->gtpu_eh.ipv6_udp.hash_fld = 0; > + pf->gtpu_eh.ipv6_udp.pkt_hdr = 0; > + > + pf->gtpu_eh.ipv4_tcp.hash_fld = 0; > + pf->gtpu_eh.ipv4_tcp.pkt_hdr = 0; > + > + pf->gtpu_eh.ipv6_tcp.hash_fld = 0; > + pf->gtpu_eh.ipv6_tcp.pkt_hdr = 0; > +} > + > static int > ice_hash_destroy(struct ice_adapter *ad, > struct rte_flow *flow, > @@ -1334,6 +1471,8 @@ struct ice_hash_match_type ice_hash_type_list[] = { > } > } > > + ice_rem_rss_cfg_post(pf); > + > rte_free(filter_ptr); > return 0; > ^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [dpdk-dev] [PATCH] net/ice: fix GTPU down/uplink and extension conflict 2020-07-24 7:13 ` Jeff Guo @ 2020-07-24 14:19 ` Su, Simei 0 siblings, 0 replies; 8+ messages in thread From: Su, Simei @ 2020-07-24 14:19 UTC (permalink / raw) To: Guo, Jia, Zhang, Qi Z, Xing, Beilei; +Cc: dev Hi, Guojia > -----Original Message----- > From: Guo, Jia <jia.guo@intel.com> > Sent: Friday, July 24, 2020 3:13 PM > To: Su, Simei <simei.su@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com>; Xing, > Beilei <beilei.xing@intel.com> > Cc: dev@dpdk.org > Subject: Re: [PATCH] net/ice: fix GTPU down/uplink and extension conflict > > hi, simei > > On 7/24/2020 10:10 AM, Simei Su wrote: > > When adding a RSS rule with GTPU_DWN/UP, it will search profile table > > from the top index. If a RSS rule with GTPU_EH already exists, then > > GTPU_DWN/UP packet will match GTPU_EH profile. This patch solves this > > issue by removing existed GTPU_EH rule before creating a new > > GTPU_DWN/UP rule. > > > Suggest interpret the relation ship bettween GTPU_EH_UPLINK/DWNLINK with > GTPU_EH to help knowledge the reason. Ok, I will refine the commit message. > > > > Fixes: 2e2810fc1868 ("net/ice: fix GTPU RSS") > > > > Signed-off-by: Simei Su <simei.su@intel.com> > > --- > > drivers/net/ice/ice_ethdev.c | 47 +++++++++++++++ > > drivers/net/ice/ice_ethdev.h | 15 +++++ > > drivers/net/ice/ice_hash.c | 139 > +++++++++++++++++++++++++++++++++++++++++++ > > 3 files changed, 201 insertions(+) > > > > diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c > > index a4a0390..8839146 100644 > > --- a/drivers/net/ice/ice_ethdev.c > > +++ b/drivers/net/ice/ice_ethdev.c > > @@ -2538,6 +2538,11 @@ static int ice_parse_devargs(struct rte_eth_dev > *dev) > > if (ret) > > PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4 rss flow fail %d", > > __func__, ret); > > > A blank line need. Ok. > > > > + /* Store hash field and header for gtpu_eh ipv4 */ > > + pf->gtpu_eh.ipv4.hash_fld = ICE_FLOW_HASH_IPV4; > > + pf->gtpu_eh.ipv4.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH | > > + ICE_FLOW_SEG_HDR_IPV4 | > > + ICE_FLOW_SEG_HDR_IPV_OTHER; > > > > ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4, > > ICE_FLOW_SEG_HDR_PPPOE | > > @@ -2564,6 +2569,11 @@ static int ice_parse_devargs(struct rte_eth_dev > *dev) > > if (ret) > > PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6 rss flow fail %d", > > __func__, ret); > > + /* Store hash field and header for gtpu_eh ipv6 */ > > + pf->gtpu_eh.ipv6.hash_fld = ICE_FLOW_HASH_IPV6; > > + pf->gtpu_eh.ipv6.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH | > > + ICE_FLOW_SEG_HDR_IPV6 | > > + ICE_FLOW_SEG_HDR_IPV_OTHER; > > > > ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6, > > ICE_FLOW_SEG_HDR_PPPOE | > > @@ -2586,6 +2596,9 @@ static int ice_parse_devargs(struct rte_eth_dev > *dev) > > if (ret) > > PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_UDP rss flow fail %d", > > __func__, ret); > > + /* Store hash field and header for gtpu_eh ipv4_udp */ > > + pf->gtpu_eh.ipv4_udp.hash_fld = ICE_HASH_UDP_IPV4; > > + pf->gtpu_eh.ipv4_udp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH; > > > > ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4, > > ICE_FLOW_SEG_HDR_PPPOE, 0); > > @@ -2606,6 +2619,9 @@ static int ice_parse_devargs(struct rte_eth_dev > *dev) > > if (ret) > > PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_UDP rss flow fail %d", > > __func__, ret); > > + /* Store hash field and header for gtpu_eh ipv6_udp */ > > + pf->gtpu_eh.ipv6_udp.hash_fld = ICE_HASH_UDP_IPV6; > > + pf->gtpu_eh.ipv6_udp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH; > > > > ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6, > > ICE_FLOW_SEG_HDR_PPPOE, 0); > > @@ -2626,6 +2642,9 @@ static int ice_parse_devargs(struct rte_eth_dev > *dev) > > if (ret) > > PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_TCP rss flow fail %d", > > __func__, ret); > > + /* Store hash field and header for gtpu_eh ipv4_tcp */ > > + pf->gtpu_eh.ipv4_tcp.hash_fld = ICE_HASH_TCP_IPV4; > > + pf->gtpu_eh.ipv4_tcp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH; > > > > ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4, > > ICE_FLOW_SEG_HDR_PPPOE, 0); > > @@ -2646,6 +2665,9 @@ static int ice_parse_devargs(struct rte_eth_dev > *dev) > > if (ret) > > PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_TCP rss flow fail %d", > > __func__, ret); > > + /* Store hash field and header for gtpu_eh ipv6_tcp */ > > + pf->gtpu_eh.ipv6_tcp.hash_fld = ICE_HASH_TCP_IPV6; > > + pf->gtpu_eh.ipv6_tcp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH; > > > > ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6, > > ICE_FLOW_SEG_HDR_PPPOE, 0); > > @@ -2695,6 +2717,28 @@ static int ice_parse_devargs(struct rte_eth_dev > *dev) > > } > > } > > > > +static void > > +ice_rss_ctx_init(struct ice_pf *pf) > > +{ > > + pf->gtpu_eh.ipv4.hash_fld = 0; > > + pf->gtpu_eh.ipv4.pkt_hdr = 0; > > + > > + pf->gtpu_eh.ipv6.hash_fld = 0; > > + pf->gtpu_eh.ipv6.pkt_hdr = 0; > > + > > + pf->gtpu_eh.ipv4_udp.hash_fld = 0; > > + pf->gtpu_eh.ipv4_udp.pkt_hdr = 0; > > + > > + pf->gtpu_eh.ipv6_udp.hash_fld = 0; > > + pf->gtpu_eh.ipv6_udp.pkt_hdr = 0; > > + > > + pf->gtpu_eh.ipv4_tcp.hash_fld = 0; > > + pf->gtpu_eh.ipv4_tcp.pkt_hdr = 0; > > + > > + pf->gtpu_eh.ipv6_tcp.hash_fld = 0; > > + pf->gtpu_eh.ipv6_tcp.pkt_hdr = 0; > > +} > > + > > static int ice_init_rss(struct ice_pf *pf) > > { > > struct ice_hw *hw = ICE_PF_TO_HW(pf); > > @@ -2755,6 +2799,9 @@ static int ice_init_rss(struct ice_pf *pf) > > (1 << VSIQF_HASH_CTL_HASH_SCHEME_S); > > ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg); > > > > + /* Initialize RSS context for gtpu_eh */ > > + ice_rss_ctx_init(pf); > > + > > /* RSS hash configuration */ > > ice_rss_hash_set(pf, rss_conf->rss_hf); > > > > diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h > > index 87984ef..1baf0b4 100644 > > --- a/drivers/net/ice/ice_ethdev.h > > +++ b/drivers/net/ice/ice_ethdev.h > > @@ -358,6 +358,20 @@ struct ice_fdir_info { > > struct ice_fdir_counter_pool_container counter; > > }; > > > > +struct ice_gtpu_eh { > > + uint32_t pkt_hdr; > > + uint64_t hash_fld; > > +}; > > + > > > The naming "ice_gtpu_eh" is not clear, ice_hash_gtpu_eh_ctx? > Ok, I will rename it to make it clear. > > > +struct ice_hash_gtpu_eh { > > + struct ice_gtpu_eh ipv4; > > + struct ice_gtpu_eh ipv6; > > + struct ice_gtpu_eh ipv4_udp; > > + struct ice_gtpu_eh ipv6_udp; > > + struct ice_gtpu_eh ipv4_tcp; > > + struct ice_gtpu_eh ipv6_tcp; > > +}; > > + > > > I think you don't need to define struct for each pattern and set/unset > value for all of them, what you considate just 3 item, that are hdr, > hash filed and the pattern type, so you could just defined as below > > struct ice_hash_gtpu_eh_ctx { > > uint32_t pkt_hdr; > > uint64_t hash_fld; > > uint64_t hdr_hint; // for example: hdr = > ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_UDP > > } > > and then only check this hdr_hint when set uplink/dwnlink > > if(hdr & pf->gtpu_eh_ctx->hdr_hint) > > ice_rem_rss_cfg Because we should also remember default rule context, so we still need to define structure for each pattern. > > > struct ice_pf { > > struct ice_adapter *adapter; /* The adapter this PF associate to */ > > struct ice_vsi *main_vsi; /* pointer to main VSI structure */ > > @@ -381,6 +395,7 @@ struct ice_pf { > > uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */ > > uint16_t fdir_qp_offset; > > struct ice_fdir_info fdir; /* flow director info */ > > + struct ice_hash_gtpu_eh gtpu_eh; > > uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; > > uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; > > struct ice_hw_port_stats stats_offset; > > diff --git a/drivers/net/ice/ice_hash.c b/drivers/net/ice/ice_hash.c > > index e535e4b..dd70353 100644 > > --- a/drivers/net/ice/ice_hash.c > > +++ b/drivers/net/ice/ice_hash.c > > @@ -1232,6 +1232,117 @@ struct ice_hash_match_type > ice_hash_type_list[] = { > > } > > > > static int > > +ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr, uint64_t fld) > > +{ > > + struct ice_hw *hw = ICE_PF_TO_HW(pf); > > + struct ice_vsi *vsi = pf->main_vsi; > > + int ret; > > + > > + /** > > + * If header field contains GTPU_EH, store gtpu_eh context. > > + * If header field contains GTPU_DWN/UP, remove existed gtpu_eh. > > + */ > > + if ((hdr & ICE_FLOW_SEG_HDR_GTPU_EH) && > > + (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN | > > + ICE_FLOW_SEG_HDR_GTPU_UP)) == 0) { > > > No need to check !=DWN/UP here, EH/DWN/UP are mutual exclusion, they > also handler when parse pattern. Ok. I thought EH and DWN/UP could co-exist. > > > > + if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && > > + (hdr & ICE_FLOW_SEG_HDR_UDP)) { > > > Alignment should match open parenthesis. Below is the same. OK. > > > > + pf->gtpu_eh.ipv4_udp.pkt_hdr = hdr; > > + pf->gtpu_eh.ipv4_udp.hash_fld = fld; > > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && > > + (hdr & ICE_FLOW_SEG_HDR_UDP)) { > > + pf->gtpu_eh.ipv6_udp.pkt_hdr = hdr; > > + pf->gtpu_eh.ipv6_udp.hash_fld = fld; > > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && > > + (hdr & ICE_FLOW_SEG_HDR_TCP)) { > > + pf->gtpu_eh.ipv4_tcp.pkt_hdr = hdr; > > + pf->gtpu_eh.ipv4_tcp.hash_fld = fld; > > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && > > + (hdr & ICE_FLOW_SEG_HDR_TCP)) { > > + pf->gtpu_eh.ipv6_tcp.pkt_hdr = hdr; > > + pf->gtpu_eh.ipv6_tcp.hash_fld = fld; > > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && > > + (hdr & (ICE_FLOW_SEG_HDR_UDP | > > + ICE_FLOW_SEG_HDR_TCP)) == 0) { > > + pf->gtpu_eh.ipv4.pkt_hdr = hdr; > > + pf->gtpu_eh.ipv4.hash_fld = fld; > > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && > > + (hdr & (ICE_FLOW_SEG_HDR_UDP | > > + ICE_FLOW_SEG_HDR_TCP)) == 0) { > > + pf->gtpu_eh.ipv6.pkt_hdr = hdr; > > + pf->gtpu_eh.ipv6.hash_fld = fld; > > + } > > + } else if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN | > > + ICE_FLOW_SEG_HDR_GTPU_UP)) { > > + if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && > > + (hdr & ICE_FLOW_SEG_HDR_UDP)) { > > + if (pf->gtpu_eh.ipv4_udp.hash_fld && > > + pf->gtpu_eh.ipv4_udp.pkt_hdr) { > > + ret = ice_rem_rss_cfg(hw, vsi->idx, > > + pf->gtpu_eh.ipv4_udp.hash_fld, > > + pf->gtpu_eh.ipv4_udp.pkt_hdr); > > + if (ret) > > + return -rte_errno; > > + } > > > Is it better to use a local variable and then call ice_rem_rss_cfg one > time after if-else? Yes, you are right. I will clean code in next version. Thanks. Br Simei > > > > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && > > + (hdr & ICE_FLOW_SEG_HDR_UDP)) { > > + if (pf->gtpu_eh.ipv6_udp.hash_fld && > > + pf->gtpu_eh.ipv6_udp.pkt_hdr) { > > + ret = ice_rem_rss_cfg(hw, vsi->idx, > > + pf->gtpu_eh.ipv6_udp.hash_fld, > > + pf->gtpu_eh.ipv6_udp.pkt_hdr); > > + if (ret) > > + return -rte_errno; > > + } > > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && > > + (hdr & ICE_FLOW_SEG_HDR_TCP)) { > > + if (pf->gtpu_eh.ipv4_tcp.hash_fld && > > + pf->gtpu_eh.ipv4_tcp.pkt_hdr) { > > + ret = ice_rem_rss_cfg(hw, vsi->idx, > > + pf->gtpu_eh.ipv4_tcp.hash_fld, > > + pf->gtpu_eh.ipv4_tcp.pkt_hdr); > > + if (ret) > > + return -rte_errno; > > + } > > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && > > + (hdr & ICE_FLOW_SEG_HDR_TCP)) { > > + if (pf->gtpu_eh.ipv6_tcp.hash_fld && > > + pf->gtpu_eh.ipv6_tcp.pkt_hdr) { > > + ret = ice_rem_rss_cfg(hw, vsi->idx, > > + pf->gtpu_eh.ipv6_tcp.hash_fld, > > + pf->gtpu_eh.ipv6_tcp.pkt_hdr); > > + if (ret) > > + return -rte_errno; > > + } > > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && > > + (hdr & (ICE_FLOW_SEG_HDR_UDP | > > + ICE_FLOW_SEG_HDR_TCP)) == 0) { > > + if (pf->gtpu_eh.ipv4.hash_fld && > > + pf->gtpu_eh.ipv4.pkt_hdr) { > > + ret = ice_rem_rss_cfg(hw, vsi->idx, > > + pf->gtpu_eh.ipv4.hash_fld, > > + pf->gtpu_eh.ipv4.pkt_hdr); > > + if (ret) > > + return -rte_errno; > > + } > > + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && > > + (hdr & (ICE_FLOW_SEG_HDR_UDP | > > + ICE_FLOW_SEG_HDR_TCP)) == 0) { > > + if (pf->gtpu_eh.ipv6.hash_fld && > > + pf->gtpu_eh.ipv6.pkt_hdr) { > > + ret = ice_rem_rss_cfg(hw, vsi->idx, > > + pf->gtpu_eh.ipv6.hash_fld, > > + pf->gtpu_eh.ipv6.pkt_hdr); > > + if (ret) > > + return -rte_errno; > > + } > > + } > > + } > > + > > + return 0; > > +} > > + > > +static int > > ice_hash_create(struct ice_adapter *ad, > > struct rte_flow *flow, > > void *meta, > > @@ -1248,6 +1359,10 @@ struct ice_hash_match_type ice_hash_type_list[] > = { > > uint64_t hash_field = ((struct rss_meta *)meta)->hash_flds; > > uint8_t hash_function = ((struct rss_meta *)meta)->hash_function; > > > > + ret = ice_add_rss_cfg_pre(pf, headermask, hash_field); > > + if (ret) > > + return -rte_errno; > > + > > filter_ptr = rte_zmalloc("ice_rss_filter", > > sizeof(struct ice_hash_flow_cfg), 0); > > if (!filter_ptr) { > > @@ -1297,6 +1412,28 @@ struct ice_hash_match_type ice_hash_type_list[] > = { > > return -rte_errno; > > } > > > > +static void > > +ice_rem_rss_cfg_post(struct ice_pf *pf) > > +{ > > + pf->gtpu_eh.ipv4.hash_fld = 0; > > + pf->gtpu_eh.ipv4.pkt_hdr = 0; > > + > > + pf->gtpu_eh.ipv6.hash_fld = 0; > > + pf->gtpu_eh.ipv6.pkt_hdr = 0; > > + > > + pf->gtpu_eh.ipv4_udp.hash_fld = 0; > > + pf->gtpu_eh.ipv4_udp.pkt_hdr = 0; > > + > > + pf->gtpu_eh.ipv6_udp.hash_fld = 0; > > + pf->gtpu_eh.ipv6_udp.pkt_hdr = 0; > > + > > + pf->gtpu_eh.ipv4_tcp.hash_fld = 0; > > + pf->gtpu_eh.ipv4_tcp.pkt_hdr = 0; > > + > > + pf->gtpu_eh.ipv6_tcp.hash_fld = 0; > > + pf->gtpu_eh.ipv6_tcp.pkt_hdr = 0; > > +} > > + > > static int > > ice_hash_destroy(struct ice_adapter *ad, > > struct rte_flow *flow, > > @@ -1334,6 +1471,8 @@ struct ice_hash_match_type ice_hash_type_list[] > = { > > } > > } > > > > + ice_rem_rss_cfg_post(pf); > > + > > rte_free(filter_ptr); > > return 0; > > ^ permalink raw reply [flat|nested] 8+ messages in thread
* [dpdk-dev] [PATCH v2] net/ice: fix GTPU down/uplink and extension conflict 2020-07-24 2:10 [dpdk-dev] [PATCH] net/ice: fix GTPU down/uplink and extension conflict Simei Su 2020-07-24 7:13 ` Jeff Guo @ 2020-07-24 14:41 ` Simei Su 2020-07-26 3:13 ` [dpdk-dev] [PATCH v3] " Simei Su 1 sibling, 1 reply; 8+ messages in thread From: Simei Su @ 2020-07-24 14:41 UTC (permalink / raw) To: qi.z.zhang, beilei.xing; +Cc: dev, jia.guo, Simei Su When adding a RSS rule with GTPU_DWN/UP, it will write from top to bottom for profile due to firmware limitation. If a RSS rule with GTPU_EH already exists, then GTPU_DWN/UP packet will match GTPU_EH profile. This patch solves this issue by remembering a gtpu_eh RSS configure and removing it before the corresponding RSS configure for downlink/uplink rule is issued. Fixes: 2e2810fc1868 ("net/ice: fix GTPU RSS") Signed-off-by: Simei Su <simei.su@intel.com> --- v2: * Refine commit message. * Clean code. * Fix gtpu down/uplink can't be issued simultaneously. * Fix gtpu down/uplink ipv4_udp/tcp or ipv6_udp/tcp for symmetric rule can't take effect. --- drivers/net/ice/ice_ethdev.c | 53 ++++++++++++ drivers/net/ice/ice_ethdev.h | 15 ++++ drivers/net/ice/ice_hash.c | 201 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 269 insertions(+) diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index a4a0390..84dbf6b 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -2539,6 +2539,12 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4 rss flow fail %d", __func__, ret); + /* Store hash field and header for gtpu_eh ipv4 */ + pf->gtpu_eh.ipv4.hash_fld = ICE_FLOW_HASH_IPV4; + pf->gtpu_eh.ipv4.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER; + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4, ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 | @@ -2565,6 +2571,12 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6 rss flow fail %d", __func__, ret); + /* Store hash field and header for gtpu_eh ipv6 */ + pf->gtpu_eh.ipv6.hash_fld = ICE_FLOW_HASH_IPV6; + pf->gtpu_eh.ipv6.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER; + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6, ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 | @@ -2587,6 +2599,10 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_UDP rss flow fail %d", __func__, ret); + /* Store hash field and header for gtpu_eh ipv4_udp */ + pf->gtpu_eh.ipv4_udp.hash_fld = ICE_HASH_UDP_IPV4; + pf->gtpu_eh.ipv4_udp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH; + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4, ICE_FLOW_SEG_HDR_PPPOE, 0); if (ret) @@ -2607,6 +2623,10 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_UDP rss flow fail %d", __func__, ret); + /* Store hash field and header for gtpu_eh ipv6_udp */ + pf->gtpu_eh.ipv6_udp.hash_fld = ICE_HASH_UDP_IPV6; + pf->gtpu_eh.ipv6_udp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH; + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6, ICE_FLOW_SEG_HDR_PPPOE, 0); if (ret) @@ -2627,6 +2647,10 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_TCP rss flow fail %d", __func__, ret); + /* Store hash field and header for gtpu_eh ipv4_tcp */ + pf->gtpu_eh.ipv4_tcp.hash_fld = ICE_HASH_TCP_IPV4; + pf->gtpu_eh.ipv4_tcp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH; + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4, ICE_FLOW_SEG_HDR_PPPOE, 0); if (ret) @@ -2647,6 +2671,10 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_TCP rss flow fail %d", __func__, ret); + /* Store hash field and header for gtpu_eh ipv6_tcp */ + pf->gtpu_eh.ipv6_tcp.hash_fld = ICE_HASH_TCP_IPV6; + pf->gtpu_eh.ipv6_tcp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH; + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6, ICE_FLOW_SEG_HDR_PPPOE, 0); if (ret) @@ -2695,6 +2723,28 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) } } +static void +ice_rss_ctx_init(struct ice_pf *pf) +{ + pf->gtpu_eh.ipv4.hash_fld = 0; + pf->gtpu_eh.ipv4.pkt_hdr = 0; + + pf->gtpu_eh.ipv6.hash_fld = 0; + pf->gtpu_eh.ipv6.pkt_hdr = 0; + + pf->gtpu_eh.ipv4_udp.hash_fld = 0; + pf->gtpu_eh.ipv4_udp.pkt_hdr = 0; + + pf->gtpu_eh.ipv6_udp.hash_fld = 0; + pf->gtpu_eh.ipv6_udp.pkt_hdr = 0; + + pf->gtpu_eh.ipv4_tcp.hash_fld = 0; + pf->gtpu_eh.ipv4_tcp.pkt_hdr = 0; + + pf->gtpu_eh.ipv6_tcp.hash_fld = 0; + pf->gtpu_eh.ipv6_tcp.pkt_hdr = 0; +} + static int ice_init_rss(struct ice_pf *pf) { struct ice_hw *hw = ICE_PF_TO_HW(pf); @@ -2755,6 +2805,9 @@ static int ice_init_rss(struct ice_pf *pf) (1 << VSIQF_HASH_CTL_HASH_SCHEME_S); ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg); + /* Initialize RSS context for gtpu_eh */ + ice_rss_ctx_init(pf); + /* RSS hash configuration */ ice_rss_hash_set(pf, rss_conf->rss_hf); diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index 87984ef..e20503f 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -358,6 +358,20 @@ struct ice_fdir_info { struct ice_fdir_counter_pool_container counter; }; +struct ice_hash_gtpu_eh_ctx { + uint32_t pkt_hdr; + uint64_t hash_fld; +}; + +struct ice_hash_gtpu_eh { + struct ice_hash_gtpu_eh_ctx ipv4; + struct ice_hash_gtpu_eh_ctx ipv6; + struct ice_hash_gtpu_eh_ctx ipv4_udp; + struct ice_hash_gtpu_eh_ctx ipv6_udp; + struct ice_hash_gtpu_eh_ctx ipv4_tcp; + struct ice_hash_gtpu_eh_ctx ipv6_tcp; +}; + struct ice_pf { struct ice_adapter *adapter; /* The adapter this PF associate to */ struct ice_vsi *main_vsi; /* pointer to main VSI structure */ @@ -381,6 +395,7 @@ struct ice_pf { uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */ uint16_t fdir_qp_offset; struct ice_fdir_info fdir; /* flow director info */ + struct ice_hash_gtpu_eh gtpu_eh; uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; struct ice_hw_port_stats stats_offset; diff --git a/drivers/net/ice/ice_hash.c b/drivers/net/ice/ice_hash.c index e535e4b..83f1a3c 100644 --- a/drivers/net/ice/ice_hash.c +++ b/drivers/net/ice/ice_hash.c @@ -1232,6 +1232,179 @@ struct ice_hash_match_type ice_hash_type_list[] = { } static int +ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr, uint64_t fld) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_vsi *vsi = pf->main_vsi; + int ret; + + uint32_t ipv4_hdr = pf->gtpu_eh.ipv4.pkt_hdr; + uint64_t ipv4_fld = pf->gtpu_eh.ipv4.hash_fld; + + uint32_t ipv6_hdr = pf->gtpu_eh.ipv6.pkt_hdr; + uint64_t ipv6_fld = pf->gtpu_eh.ipv6.hash_fld; + + uint32_t ipv4_udp_hdr = pf->gtpu_eh.ipv4_udp.pkt_hdr; + uint64_t ipv4_udp_fld = pf->gtpu_eh.ipv4_udp.hash_fld; + + uint32_t ipv6_udp_hdr = pf->gtpu_eh.ipv6_udp.pkt_hdr; + uint32_t ipv6_udp_fld = pf->gtpu_eh.ipv6_udp.hash_fld; + + uint32_t ipv4_tcp_hdr = pf->gtpu_eh.ipv4_tcp.pkt_hdr; + uint64_t ipv4_tcp_fld = pf->gtpu_eh.ipv4_tcp.hash_fld; + + uint32_t ipv6_tcp_hdr = pf->gtpu_eh.ipv6_tcp.pkt_hdr; + uint64_t ipv6_tcp_fld = pf->gtpu_eh.ipv6_tcp.hash_fld; + + /** + * If header field contains GTPU_EH, store gtpu_eh context. + * If header field contains GTPU_DWN/UP, remove existed gtpu_eh. + */ + if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) { + if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + ipv4_udp_hdr = hdr; + ipv4_udp_fld = fld; + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + ipv6_udp_hdr = hdr; + ipv6_udp_fld = fld; + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + ipv4_tcp_hdr = hdr; + ipv4_tcp_fld = fld; + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + ipv6_tcp_hdr = hdr; + ipv6_tcp_fld = fld; + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & (ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_TCP)) == 0) { + ipv4_hdr = hdr; + ipv4_fld = fld; + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & (ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_TCP)) == 0) { + ipv6_hdr = hdr; + ipv6_fld = fld; + } + } else if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN | + ICE_FLOW_SEG_HDR_GTPU_UP)) { + if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + if (ipv4_udp_fld && ipv4_udp_hdr) { + ret = ice_rem_rss_cfg(hw, vsi->idx, + ipv4_udp_fld, ipv4_udp_hdr); + if (ret) + return -rte_errno; + + ipv4_udp_fld = 0; + ipv4_udp_hdr = 0; + } + + if (ipv4_fld && ipv4_hdr) { + ret = ice_rem_rss_cfg(hw, vsi->idx, + ipv4_fld, ipv4_hdr); + if (ret) + return -rte_errno; + + ipv4_fld = 0; + ipv4_hdr = 0; + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + if (ipv6_udp_fld && ipv6_udp_hdr) { + ret = ice_rem_rss_cfg(hw, vsi->idx, + ipv6_udp_fld, ipv6_udp_hdr); + if (ret) + return -rte_errno; + + ipv6_udp_fld = 0; + ipv6_udp_hdr = 0; + } + + if (ipv6_fld && ipv6_hdr) { + ret = ice_rem_rss_cfg(hw, vsi->idx, + ipv6_fld, ipv6_hdr); + if (ret) + return -rte_errno; + + ipv6_fld = 0; + ipv6_hdr = 0; + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + if (ipv4_tcp_fld && ipv4_tcp_hdr) { + ret = ice_rem_rss_cfg(hw, vsi->idx, + ipv4_tcp_fld, ipv4_tcp_hdr); + if (ret) + return -rte_errno; + + ipv4_tcp_fld = 0; + ipv4_tcp_hdr = 0; + } + + if (ipv4_fld & ipv4_hdr) { + ret = ice_rem_rss_cfg(hw, vsi->idx, + ipv4_fld, ipv4_hdr); + if (ret) + return -rte_errno; + + ipv4_fld = 0; + ipv4_hdr = 0; + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + if (ipv6_tcp_fld && ipv6_tcp_hdr) { + ret = ice_rem_rss_cfg(hw, vsi->idx, + ipv6_tcp_fld, ipv6_tcp_hdr); + if (ret) + return -rte_errno; + + ipv6_tcp_fld = 0; + ipv6_tcp_hdr = 0; + } + + if (ipv6_fld && ipv6_hdr) { + ret = ice_rem_rss_cfg(hw, vsi->idx, + ipv6_fld, ipv6_hdr); + if (ret) + return -rte_errno; + + ipv6_fld = 0; + ipv6_hdr = 0; + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & (ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_TCP)) == 0) { + if (ipv4_fld && ipv4_hdr) { + ret = ice_rem_rss_cfg(hw, vsi->idx, + ipv4_fld, ipv4_hdr); + if (ret) + return -rte_errno; + + ipv4_fld = 0; + ipv4_hdr = 0; + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & (ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_TCP)) == 0) { + if (ipv6_fld && ipv6_hdr) { + ret = ice_rem_rss_cfg(hw, vsi->idx, + ipv6_fld, ipv6_hdr); + if (ret) + return -rte_errno; + + ipv6_fld = 0; + ipv6_hdr = 0; + } + } + } + + return 0; +} + +static int ice_hash_create(struct ice_adapter *ad, struct rte_flow *flow, void *meta, @@ -1248,6 +1421,10 @@ struct ice_hash_match_type ice_hash_type_list[] = { uint64_t hash_field = ((struct rss_meta *)meta)->hash_flds; uint8_t hash_function = ((struct rss_meta *)meta)->hash_function; + ret = ice_add_rss_cfg_pre(pf, headermask, hash_field); + if (ret) + return -rte_errno; + filter_ptr = rte_zmalloc("ice_rss_filter", sizeof(struct ice_hash_flow_cfg), 0); if (!filter_ptr) { @@ -1297,6 +1474,28 @@ struct ice_hash_match_type ice_hash_type_list[] = { return -rte_errno; } +static void +ice_rem_rss_cfg_post(struct ice_pf *pf) +{ + pf->gtpu_eh.ipv4.hash_fld = 0; + pf->gtpu_eh.ipv4.pkt_hdr = 0; + + pf->gtpu_eh.ipv6.hash_fld = 0; + pf->gtpu_eh.ipv6.pkt_hdr = 0; + + pf->gtpu_eh.ipv4_udp.hash_fld = 0; + pf->gtpu_eh.ipv4_udp.pkt_hdr = 0; + + pf->gtpu_eh.ipv6_udp.hash_fld = 0; + pf->gtpu_eh.ipv6_udp.pkt_hdr = 0; + + pf->gtpu_eh.ipv4_tcp.hash_fld = 0; + pf->gtpu_eh.ipv4_tcp.pkt_hdr = 0; + + pf->gtpu_eh.ipv6_tcp.hash_fld = 0; + pf->gtpu_eh.ipv6_tcp.pkt_hdr = 0; +} + static int ice_hash_destroy(struct ice_adapter *ad, struct rte_flow *flow, @@ -1334,6 +1533,8 @@ struct ice_hash_match_type ice_hash_type_list[] = { } } + ice_rem_rss_cfg_post(pf); + rte_free(filter_ptr); return 0; -- 1.8.3.1 ^ permalink raw reply [flat|nested] 8+ messages in thread
* [dpdk-dev] [PATCH v3] net/ice: fix GTPU down/uplink and extension conflict 2020-07-24 14:41 ` [dpdk-dev] [PATCH v2] " Simei Su @ 2020-07-26 3:13 ` Simei Su 2020-07-27 9:38 ` [dpdk-dev] [PATCH v4] " Simei Su 0 siblings, 1 reply; 8+ messages in thread From: Simei Su @ 2020-07-26 3:13 UTC (permalink / raw) To: qi.z.zhang, beilei.xing; +Cc: dev, jia.guo, Simei Su When adding a RSS rule with GTPU_DWN/UP, it will write from top to bottom for profile due to firmware limitation. If a RSS rule with GTPU_EH already exists, then GTPU_DWN/UP packet will match GTPU_EH profile. This patch solves this issue by remembering a gtpu_eh RSS configure and removing it before the corresponding RSS configure for downlink/uplink rule is issued. Fixes: 2e2810fc1868 ("net/ice: fix GTPU RSS") Signed-off-by: Simei Su <simei.su@intel.com> --- v3: * Rename global structure. * Use some macros to avoid code duplication. * Revise incorrect code where uses local variable. v2: * Refine commit log. * Fix gtpu downlink and uplink can't be issued simultaneously. * Fix gtpu down/uplink ipv4_udp/tcp or ipv6_udp/tcp symmetric don't take effect. --- drivers/net/ice/ice_ethdev.c | 44 +++++++++++ drivers/net/ice/ice_ethdev.h | 23 ++++++ drivers/net/ice/ice_hash.c | 174 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 241 insertions(+) diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index a4a0390..67f6c65 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -2539,6 +2539,12 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4 rss flow fail %d", __func__, ret); + /* Store hash field and header for gtpu_eh ipv4 */ + pf->gtpu_ctx.ipv4.hash_fld = ICE_FLOW_HASH_IPV4; + pf->gtpu_ctx.ipv4.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER; + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4, ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 | @@ -2565,6 +2571,12 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6 rss flow fail %d", __func__, ret); + /* Store hash field and header for gtpu_eh ipv6 */ + pf->gtpu_ctx.ipv6.hash_fld = ICE_FLOW_HASH_IPV6; + pf->gtpu_ctx.ipv6.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER; + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6, ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 | @@ -2587,6 +2599,10 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_UDP rss flow fail %d", __func__, ret); + /* Store hash field and header for gtpu_eh ipv4_udp */ + pf->gtpu_ctx.ipv4_udp.hash_fld = ICE_HASH_UDP_IPV4; + pf->gtpu_ctx.ipv4_udp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH; + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4, ICE_FLOW_SEG_HDR_PPPOE, 0); if (ret) @@ -2607,6 +2623,10 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_UDP rss flow fail %d", __func__, ret); + /* Store hash field and header for gtpu_eh ipv6_udp */ + pf->gtpu_ctx.ipv6_udp.hash_fld = ICE_HASH_UDP_IPV6; + pf->gtpu_ctx.ipv6_udp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH; + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6, ICE_FLOW_SEG_HDR_PPPOE, 0); if (ret) @@ -2627,6 +2647,10 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_TCP rss flow fail %d", __func__, ret); + /* Store hash field and header for gtpu_eh ipv4_tcp */ + pf->gtpu_ctx.ipv4_tcp.hash_fld = ICE_HASH_TCP_IPV4; + pf->gtpu_ctx.ipv4_tcp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH; + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4, ICE_FLOW_SEG_HDR_PPPOE, 0); if (ret) @@ -2647,6 +2671,10 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_TCP rss flow fail %d", __func__, ret); + /* Store hash field and header for gtpu_eh ipv6_tcp */ + pf->gtpu_ctx.ipv6_tcp.hash_fld = ICE_HASH_TCP_IPV6; + pf->gtpu_ctx.ipv6_tcp.pkt_hdr = ICE_FLOW_SEG_HDR_GTPU_EH; + ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6, ICE_FLOW_SEG_HDR_PPPOE, 0); if (ret) @@ -2695,6 +2723,19 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) } } +static void +ice_rss_ctx_init(struct ice_pf *pf) +{ + ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv4); + ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv6); + + ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv4_udp); + ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv6_udp); + + ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv4_tcp); + ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv6_tcp); +} + static int ice_init_rss(struct ice_pf *pf) { struct ice_hw *hw = ICE_PF_TO_HW(pf); @@ -2755,6 +2796,9 @@ static int ice_init_rss(struct ice_pf *pf) (1 << VSIQF_HASH_CTL_HASH_SCHEME_S); ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg); + /* Initialize RSS context for gtpu_eh */ + ice_rss_ctx_init(pf); + /* RSS hash configuration */ ice_rss_hash_set(pf, rss_conf->rss_hf); diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index 87984ef..1725702 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -358,6 +358,28 @@ struct ice_fdir_info { struct ice_fdir_counter_pool_container counter; }; +#define ICE_HASH_CFG_VALID(p) \ + ((p)->hash_fld != 0 && (p)->pkt_hdr != 0) + +#define ICE_HASH_CFG_RESET(p) do { \ + (p)->hash_fld = 0; \ + (p)->pkt_hdr = 0; \ +} while (0) + +struct ice_hash_cfg { + uint32_t pkt_hdr; + uint64_t hash_fld; +}; + +struct ice_hash_gtpu_ctx { + struct ice_hash_cfg ipv4; + struct ice_hash_cfg ipv6; + struct ice_hash_cfg ipv4_udp; + struct ice_hash_cfg ipv6_udp; + struct ice_hash_cfg ipv4_tcp; + struct ice_hash_cfg ipv6_tcp; +}; + struct ice_pf { struct ice_adapter *adapter; /* The adapter this PF associate to */ struct ice_vsi *main_vsi; /* pointer to main VSI structure */ @@ -381,6 +403,7 @@ struct ice_pf { uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */ uint16_t fdir_qp_offset; struct ice_fdir_info fdir; /* flow director info */ + struct ice_hash_gtpu_ctx gtpu_ctx; uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; struct ice_hw_port_stats stats_offset; diff --git a/drivers/net/ice/ice_hash.c b/drivers/net/ice/ice_hash.c index e535e4b..3258c1c 100644 --- a/drivers/net/ice/ice_hash.c +++ b/drivers/net/ice/ice_hash.c @@ -1232,6 +1232,161 @@ struct ice_hash_match_type ice_hash_type_list[] = { } static int +ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr, uint64_t fld) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_vsi *vsi = pf->main_vsi; + int ret; + + /** + * If header field contains GTPU_EH, store gtpu_eh context. + * If header field contains GTPU_DWN/UP, remove existed gtpu_eh. + */ + if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) { + if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + pf->gtpu_ctx.ipv4_udp.pkt_hdr = hdr; + pf->gtpu_ctx.ipv4_udp.hash_fld = fld; + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + pf->gtpu_ctx.ipv6_udp.pkt_hdr = hdr; + pf->gtpu_ctx.ipv6_udp.hash_fld = fld; + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + pf->gtpu_ctx.ipv4_tcp.pkt_hdr = hdr; + pf->gtpu_ctx.ipv4_tcp.hash_fld = fld; + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + pf->gtpu_ctx.ipv6_tcp.pkt_hdr = hdr; + pf->gtpu_ctx.ipv6_tcp.hash_fld = fld; + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & (ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_TCP)) == 0) { + pf->gtpu_ctx.ipv4.pkt_hdr = hdr; + pf->gtpu_ctx.ipv4.hash_fld = fld; + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & (ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_TCP)) == 0) { + pf->gtpu_ctx.ipv6.pkt_hdr = hdr; + pf->gtpu_ctx.ipv6.hash_fld = fld; + } + } else if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN | + ICE_FLOW_SEG_HDR_GTPU_UP)) { + if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + if (ICE_HASH_CFG_VALID(&pf->gtpu_ctx.ipv4_udp)) { + ret = ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_ctx.ipv4_udp.hash_fld, + pf->gtpu_ctx.ipv4_udp.pkt_hdr); + if (ret) + return -rte_errno; + + ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv4_udp); + } + + if (ICE_HASH_CFG_VALID(&pf->gtpu_ctx.ipv4)) { + ret = ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_ctx.ipv4.hash_fld, + pf->gtpu_ctx.ipv4.pkt_hdr); + if (ret) + return -rte_errno; + + ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv4); + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + if (ICE_HASH_CFG_VALID(&pf->gtpu_ctx.ipv6_udp)) { + ret = ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_ctx.ipv6_udp.hash_fld, + pf->gtpu_ctx.ipv6_udp.pkt_hdr); + if (ret) + return -rte_errno; + + ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv6_udp); + } + + if (ICE_HASH_CFG_VALID(&pf->gtpu_ctx.ipv6)) { + ret = ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_ctx.ipv6.hash_fld, + pf->gtpu_ctx.ipv6.pkt_hdr); + if (ret) + return -rte_errno; + + ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv6); + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + if (ICE_HASH_CFG_VALID(&pf->gtpu_ctx.ipv4_tcp)) { + ret = ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_ctx.ipv4_tcp.hash_fld, + pf->gtpu_ctx.ipv4_tcp.pkt_hdr); + if (ret) + return -rte_errno; + + ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv4_tcp); + } + + if (ICE_HASH_CFG_VALID(&pf->gtpu_ctx.ipv4)) { + ret = ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_ctx.ipv4.hash_fld, + pf->gtpu_ctx.ipv4.pkt_hdr); + if (ret) + return -rte_errno; + + ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv4); + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + if (ICE_HASH_CFG_VALID(&pf->gtpu_ctx.ipv6_tcp)) { + ret = ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_ctx.ipv6_tcp.hash_fld, + pf->gtpu_ctx.ipv6_tcp.pkt_hdr); + if (ret) + return -rte_errno; + + ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv6_tcp); + } + + if (ICE_HASH_CFG_VALID(&pf->gtpu_ctx.ipv6)) { + ret = ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_ctx.ipv6.hash_fld, + pf->gtpu_ctx.ipv6.pkt_hdr); + if (ret) + return -rte_errno; + + ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv6); + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & (ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_TCP)) == 0) { + if (ICE_HASH_CFG_VALID(&pf->gtpu_ctx.ipv4)) { + ret = ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_ctx.ipv4.hash_fld, + pf->gtpu_ctx.ipv4.pkt_hdr); + if (ret) + return -rte_errno; + + ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv4); + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & (ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_TCP)) == 0) { + if (ICE_HASH_CFG_VALID(&pf->gtpu_ctx.ipv6)) { + ret = ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_ctx.ipv6.hash_fld, + pf->gtpu_ctx.ipv6.pkt_hdr); + if (ret) + return -rte_errno; + + ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv6); + } + } + } + + return 0; +} + +static int ice_hash_create(struct ice_adapter *ad, struct rte_flow *flow, void *meta, @@ -1248,6 +1403,10 @@ struct ice_hash_match_type ice_hash_type_list[] = { uint64_t hash_field = ((struct rss_meta *)meta)->hash_flds; uint8_t hash_function = ((struct rss_meta *)meta)->hash_function; + ret = ice_add_rss_cfg_pre(pf, headermask, hash_field); + if (ret) + return -rte_errno; + filter_ptr = rte_zmalloc("ice_rss_filter", sizeof(struct ice_hash_flow_cfg), 0); if (!filter_ptr) { @@ -1297,6 +1456,19 @@ struct ice_hash_match_type ice_hash_type_list[] = { return -rte_errno; } +static void +ice_rem_rss_cfg_post(struct ice_pf *pf) +{ + ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv4); + ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv6); + + ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv4_udp); + ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv6_udp); + + ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv4_tcp); + ICE_HASH_CFG_RESET(&pf->gtpu_ctx.ipv6_tcp); +} + static int ice_hash_destroy(struct ice_adapter *ad, struct rte_flow *flow, @@ -1334,6 +1506,8 @@ struct ice_hash_match_type ice_hash_type_list[] = { } } + ice_rem_rss_cfg_post(pf); + rte_free(filter_ptr); return 0; -- 1.8.3.1 ^ permalink raw reply [flat|nested] 8+ messages in thread
* [dpdk-dev] [PATCH v4] net/ice: fix GTPU down/uplink and extension conflict 2020-07-26 3:13 ` [dpdk-dev] [PATCH v3] " Simei Su @ 2020-07-27 9:38 ` Simei Su 2020-07-28 8:44 ` [dpdk-dev] [PATCH v5] " Simei Su 0 siblings, 1 reply; 8+ messages in thread From: Simei Su @ 2020-07-27 9:38 UTC (permalink / raw) To: qi.z.zhang, beilei.xing; +Cc: dev, jia.guo, Simei Su When adding a RSS rule with GTPU_DWN/UP, it will write from top to bottom for profile due to firmware limitation. If a RSS rule with GTPU_EH already exists, then GTPU_DWN/UP packet will match GTPU_EH profile. This patch solves this issue by remembering a gtpu_eh RSS configure and removing it before the corresponding RSS configure for downlink/uplink rule is issued. Fixes: 2e2810fc1868 ("net/ice: fix GTPU RSS") Signed-off-by: Simei Su <simei.su@intel.com> --- v4: * Create ice_add_rss_cfg_wrap to replace existed ice_add_rss_cfg with it. * Create ice_rem_rss_cfg_wrap to repalce existed ice_rem_rss_cfg with it. v3: * Rename global structure. * Use some macros to avoid code duplication. * Revise incorrect code where uses local variable. v2: * Refine commit log. * Fix gtpu downlink and uplink can't be issued simultaneously. * Fix gtpu down/uplink ipv4_udp/tcp or ipv6_udp/tcp symmetric don't take effect. --- drivers/net/ice/ice_ethdev.c | 271 ++++++++++++++++++++++++++++++++++++++----- drivers/net/ice/ice_ethdev.h | 27 +++++ drivers/net/ice/ice_hash.c | 4 +- 3 files changed, 269 insertions(+), 33 deletions(-) diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index c4c0e63..70fbf2b 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -2429,16 +2429,209 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) return 0; } +static int +ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr) +{ + if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) { + if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp); + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp); + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp); + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp); + } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) { + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4); + } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) { + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6); + } + } + + return 0; +} + +static int +ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr, uint64_t fld) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_vsi *vsi = pf->main_vsi; + + /** + * If header field contains GTPU_EH, store gtpu_eh context. + * If header field contains GTPU_DWN/UP, remove existed gtpu_eh. + */ + if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) { + if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr = hdr; + pf->gtpu_hash_ctx.ipv4_udp.hash_fld = fld; + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr = hdr; + pf->gtpu_hash_ctx.ipv6_udp.hash_fld = fld; + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr = hdr; + pf->gtpu_hash_ctx.ipv4_tcp.hash_fld = fld; + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr = hdr; + pf->gtpu_hash_ctx.ipv6_tcp.hash_fld = fld; + } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) { + pf->gtpu_hash_ctx.ipv4.pkt_hdr = hdr; + pf->gtpu_hash_ctx.ipv4.hash_fld = fld; + + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp); + } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) { + pf->gtpu_hash_ctx.ipv6.pkt_hdr = hdr; + pf->gtpu_hash_ctx.ipv6.hash_fld = fld; + + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp); + } + } else if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN | + ICE_FLOW_SEG_HDR_GTPU_UP)) { + if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_udp)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv4_udp.hash_fld, + pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr); + + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp); + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_udp)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv6_udp.hash_fld, + pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr); + + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp); + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_tcp)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv4_tcp.hash_fld, + pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr); + + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp); + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_tcp)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv6_tcp.hash_fld, + pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr); + + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp); + } + } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) { + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv4.hash_fld, + pf->gtpu_hash_ctx.ipv4.pkt_hdr); + + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4); + } + + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_udp)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv4_udp.hash_fld, + pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr); + + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp); + } + + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_tcp)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv4_tcp.hash_fld, + pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr); + + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp); + } + } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) { + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv6.hash_fld, + pf->gtpu_hash_ctx.ipv6.pkt_hdr); + + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6); + } + + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_udp)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv6_udp.hash_fld, + pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr); + + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp); + } + + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_tcp)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv6_tcp.hash_fld, + pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr); + + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp); + } + } + } + + return 0; +} + +int +ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, + uint64_t fld, uint32_t hdr) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + int ret; + + ret = ice_rem_rss_cfg(hw, vsi_id, fld, hdr); + if (ret && ret != ICE_ERR_DOES_NOT_EXIST) + PMD_DRV_LOG(ERR, "remove rss cfg failed\n"); + + ret = ice_rem_rss_cfg_post(pf, hdr); + if (ret) + PMD_DRV_LOG(ERR, "remove rss cfg post failed\n"); + + return 0; +} + +int +ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, + uint64_t fld, uint32_t hdr, bool symm) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + int ret; + + ret = ice_add_rss_cfg_pre(pf, hdr, fld); + if (ret) + PMD_DRV_LOG(ERR, "add rss cfg pre failed\n"); + + ret = ice_add_rss_cfg(hw, vsi_id, fld, hdr, symm); + if (ret) + PMD_DRV_LOG(ERR, "add rss cfg failed\n"); + + return 0; +} + static void ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf) { - struct ice_hw *hw = ICE_PF_TO_HW(pf); struct ice_vsi *vsi = pf->main_vsi; int ret; /* Configure RSS for IPv4 with src/dst addr as input set */ if (rss_hf & ETH_RSS_IPV4) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4, ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) @@ -2448,7 +2641,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) /* Configure RSS for IPv6 with src/dst addr as input set */ if (rss_hf & ETH_RSS_IPV6) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6, ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) @@ -2458,7 +2651,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) /* Configure RSS for udp4 with src/dst addr and port as input set */ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4, ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2469,7 +2662,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) /* Configure RSS for udp6 with src/dst addr and port as input set */ if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6, ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2480,7 +2673,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) /* Configure RSS for tcp4 with src/dst addr and port as input set */ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4, ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2491,7 +2684,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) /* Configure RSS for tcp6 with src/dst addr and port as input set */ if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6, ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2502,7 +2695,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) /* Configure RSS for sctp4 with src/dst addr and port as input set */ if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4, ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2513,7 +2706,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) /* Configure RSS for sctp6 with src/dst addr and port as input set */ if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6, ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2523,7 +2716,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) } if (rss_hf & ETH_RSS_IPV4) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4, ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2531,7 +2724,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "%s GTPU_IPV4 rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4, ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2539,7 +2732,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4 rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4, ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2549,7 +2742,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) } if (rss_hf & ETH_RSS_IPV6) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6, ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2557,7 +2750,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "%s GTPU_IPV6 rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6, ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2565,7 +2758,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6 rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6, ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2575,19 +2768,19 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) } if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4, ICE_FLOW_SEG_HDR_GTPU_IP, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_IPV4_UDP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4, ICE_FLOW_SEG_HDR_GTPU_EH, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_UDP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4, ICE_FLOW_SEG_HDR_PPPOE, 0); if (ret) PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_UDP rss flow fail %d", @@ -2595,19 +2788,19 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) } if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6, ICE_FLOW_SEG_HDR_GTPU_IP, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_IPV6_UDP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6, ICE_FLOW_SEG_HDR_GTPU_EH, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_UDP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6, ICE_FLOW_SEG_HDR_PPPOE, 0); if (ret) PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_UDP rss flow fail %d", @@ -2615,19 +2808,19 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) } if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4, ICE_FLOW_SEG_HDR_GTPU_IP, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_IPV4_TCP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4, ICE_FLOW_SEG_HDR_GTPU_EH, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_TCP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4, ICE_FLOW_SEG_HDR_PPPOE, 0); if (ret) PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_TCP rss flow fail %d", @@ -2635,19 +2828,19 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) } if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6, ICE_FLOW_SEG_HDR_GTPU_IP, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_IPV6_TCP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6, ICE_FLOW_SEG_HDR_GTPU_EH, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_TCP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6, ICE_FLOW_SEG_HDR_PPPOE, 0); if (ret) PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_TCP rss flow fail %d", @@ -2655,13 +2848,13 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) } if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_SCTP_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV4, ICE_FLOW_SEG_HDR_GTPU_IP, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_IPV4_SCTP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_SCTP_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV4, ICE_FLOW_SEG_HDR_GTPU_EH, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_SCTP rss flow fail %d", @@ -2669,13 +2862,13 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) } if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_SCTP_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV6, ICE_FLOW_SEG_HDR_GTPU_IP, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_IPV6_SCTP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_SCTP_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV6, ICE_FLOW_SEG_HDR_GTPU_EH, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_SCTP rss flow fail %d", @@ -2683,6 +2876,19 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) } } +static void +ice_rss_ctx_init(struct ice_pf *pf) +{ + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6); + + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp); + + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp); +} + static int ice_init_rss(struct ice_pf *pf) { struct ice_hw *hw = ICE_PF_TO_HW(pf); @@ -2743,6 +2949,9 @@ static int ice_init_rss(struct ice_pf *pf) (1 << VSIQF_HASH_CTL_HASH_SCHEME_S); ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg); + /* Initialize RSS context for gtpu_eh */ + ice_rss_ctx_init(pf); + /* RSS hash configuration */ ice_rss_hash_set(pf, rss_conf->rss_hf); diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index 87984ef..f740532 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -358,6 +358,28 @@ struct ice_fdir_info { struct ice_fdir_counter_pool_container counter; }; +#define ICE_HASH_CFG_VALID(p) \ + ((p)->hash_fld != 0 && (p)->pkt_hdr != 0) + +#define ICE_HASH_CFG_RESET(p) do { \ + (p)->hash_fld = 0; \ + (p)->pkt_hdr = 0; \ +} while (0) + +struct ice_hash_cfg { + uint32_t pkt_hdr; + uint64_t hash_fld; +}; + +struct ice_hash_gtpu_ctx { + struct ice_hash_cfg ipv4; + struct ice_hash_cfg ipv6; + struct ice_hash_cfg ipv4_udp; + struct ice_hash_cfg ipv4_tcp; + struct ice_hash_cfg ipv6_udp; + struct ice_hash_cfg ipv6_tcp; +}; + struct ice_pf { struct ice_adapter *adapter; /* The adapter this PF associate to */ struct ice_vsi *main_vsi; /* pointer to main VSI structure */ @@ -381,6 +403,7 @@ struct ice_pf { uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */ uint16_t fdir_qp_offset; struct ice_fdir_info fdir; /* flow director info */ + struct ice_hash_gtpu_ctx gtpu_hash_ctx; uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; struct ice_hw_port_stats stats_offset; @@ -482,6 +505,10 @@ struct ice_vsi * void ice_vsi_enable_queues_intr(struct ice_vsi *vsi); void ice_vsi_disable_queues_intr(struct ice_vsi *vsi); void ice_vsi_queues_bind_intr(struct ice_vsi *vsi); +int ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, + uint64_t hash_fld, uint32_t pkt_hdr, bool symm); +int ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, + uint64_t hash_fld, uint32_t pkt_hdr); static inline int ice_align_floor(int n) diff --git a/drivers/net/ice/ice_hash.c b/drivers/net/ice/ice_hash.c index 2845ca0..143ced1 100644 --- a/drivers/net/ice/ice_hash.c +++ b/drivers/net/ice/ice_hash.c @@ -1262,7 +1262,7 @@ struct ice_hash_match_type ice_hash_type_list[] = { (hash_function == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ); - ret = ice_add_rss_cfg(hw, vsi->idx, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, filter_ptr->rss_cfg.hashed_flds, filter_ptr->rss_cfg.packet_hdr, filter_ptr->rss_cfg.symm); @@ -1306,7 +1306,7 @@ struct ice_hash_match_type ice_hash_type_list[] = { (1 << VSIQF_HASH_CTL_HASH_SCHEME_S); ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg); } else { - ret = ice_rem_rss_cfg(hw, vsi->idx, + ret = ice_rem_rss_cfg_wrap(pf, vsi->idx, filter_ptr->rss_cfg.hashed_flds, filter_ptr->rss_cfg.packet_hdr); /* Fixme: Ignore the error if a rule does not exist. -- 1.8.3.1 ^ permalink raw reply [flat|nested] 8+ messages in thread
* [dpdk-dev] [PATCH v5] net/ice: fix GTPU down/uplink and extension conflict 2020-07-27 9:38 ` [dpdk-dev] [PATCH v4] " Simei Su @ 2020-07-28 8:44 ` Simei Su 2020-07-28 11:07 ` [dpdk-dev] [PATCH v6] " Simei Su 0 siblings, 1 reply; 8+ messages in thread From: Simei Su @ 2020-07-28 8:44 UTC (permalink / raw) To: qi.z.zhang, beilei.xing; +Cc: dev, jia.guo, Simei Su When adding a RSS rule with GTPU_DWN/UP, it will write from top to bottom for profile due to firmware limitation. If a RSS rule with GTPU_EH already exists, then GTPU_DWN/UP packet will match GTPU_EH profile. This patch solves this issue by remembering a gtpu_eh RSS configure and removing it before the corresponding RSS configure for downlink/uplink rule is issued. Fixes: 2e2810fc1868 ("net/ice: fix GTPU RSS") Signed-off-by: Simei Su <simei.su@intel.com> --- v5: * Reconstruct code logic. v4: * Create ice_add_rss_cfg_wrap to replace existed ice_add_rss_cfg with it. * Create ice_rem_rss_cfg_wrap to repalce existed ice_rem_rss_cfg with it. v3: * Rename global structure. * Use some macros to avoid code duplication. * Revise incorrect code where uses local variable. v2: * Refine commit log. * Fix gtpu downlink and uplink can't be issued simultaneously. * Fix gtpu down/uplink ipv4_udp/tcp or ipv6_udp/tcp symmetric don't take effect. --- drivers/net/ice/ice_ethdev.c | 437 ++++++++++++++++++++++++++++++++++++++----- drivers/net/ice/ice_ethdev.h | 38 ++++ drivers/net/ice/ice_hash.c | 4 +- 3 files changed, 430 insertions(+), 49 deletions(-) diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index c4c0e63..718e7f7 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -2429,16 +2429,295 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) return 0; } +static int +ice_add_rss_cfg_post(struct ice_pf *pf, uint32_t hdr, uint64_t fld, bool symm) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_vsi *vsi = pf->main_vsi; + + if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) { + if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr = hdr; + pf->gtpu_hash_ctx.ipv4_udp.hash_fld = fld; + pf->gtpu_hash_ctx.ipv4_udp.symm = symm; + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr = hdr; + pf->gtpu_hash_ctx.ipv6_udp.hash_fld = fld; + pf->gtpu_hash_ctx.ipv6_udp.symm = symm; + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr = hdr; + pf->gtpu_hash_ctx.ipv4_tcp.hash_fld = fld; + pf->gtpu_hash_ctx.ipv4_tcp.symm = symm; + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr = hdr; + pf->gtpu_hash_ctx.ipv6_tcp.hash_fld = fld; + pf->gtpu_hash_ctx.ipv6_tcp.symm = symm; + } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) { + pf->gtpu_hash_ctx.ipv4.pkt_hdr = hdr; + pf->gtpu_hash_ctx.ipv4.hash_fld = fld; + pf->gtpu_hash_ctx.ipv4.symm = symm; + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp); + } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) { + pf->gtpu_hash_ctx.ipv6.pkt_hdr = hdr; + pf->gtpu_hash_ctx.ipv6.hash_fld = fld; + pf->gtpu_hash_ctx.ipv6.symm = symm; + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp); + } + } + + if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN | + ICE_FLOW_SEG_HDR_GTPU_UP)) { + if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + if (ICE_HASH_CFG_IS_ROTATING(pf->gtpu_hash_ctx. + ipv4.rotate)) { + ice_add_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv4.hash_fld, + pf->gtpu_hash_ctx.ipv4.pkt_hdr, + pf->gtpu_hash_ctx.ipv4.symm); + ICE_HASH_CFG_ROTATE_STOP(pf->gtpu_hash_ctx. + ipv4.rotate); + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + if (ICE_HASH_CFG_IS_ROTATING(pf->gtpu_hash_ctx. + ipv6.rotate)) { + ice_add_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv6.hash_fld, + pf->gtpu_hash_ctx.ipv6.pkt_hdr, + pf->gtpu_hash_ctx.ipv6.symm); + ICE_HASH_CFG_ROTATE_STOP(pf->gtpu_hash_ctx. + ipv6.rotate); + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + if (ICE_HASH_CFG_IS_ROTATING(pf->gtpu_hash_ctx. + ipv4.rotate)) { + ice_add_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv4.hash_fld, + pf->gtpu_hash_ctx.ipv4.pkt_hdr, + pf->gtpu_hash_ctx.ipv4.symm); + ICE_HASH_CFG_ROTATE_STOP(pf->gtpu_hash_ctx. + ipv4.rotate); + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + if (ICE_HASH_CFG_IS_ROTATING(pf->gtpu_hash_ctx. + ipv6.rotate)) { + ice_add_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv6.hash_fld, + pf->gtpu_hash_ctx.ipv6.pkt_hdr, + pf->gtpu_hash_ctx.ipv6.symm); + ICE_HASH_CFG_ROTATE_STOP(pf->gtpu_hash_ctx. + ipv6.rotate); + } + } + } + + return 0; +} + +static int +ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_vsi *vsi = pf->main_vsi; + + if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN | + ICE_FLOW_SEG_HDR_GTPU_UP)) { + if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_udp)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv4_udp.hash_fld, + pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp); + } + + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv4.hash_fld, + pf->gtpu_hash_ctx.ipv4.pkt_hdr); + ICE_HASH_CFG_ROTATE_START(pf->gtpu_hash_ctx. + ipv4.rotate); + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_udp)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv6_udp.hash_fld, + pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp); + } + + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv6.hash_fld, + pf->gtpu_hash_ctx.ipv6.pkt_hdr); + ICE_HASH_CFG_ROTATE_START(pf->gtpu_hash_ctx. + ipv6.rotate); + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_tcp)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv4_tcp.hash_fld, + pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp); + } + + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv4.hash_fld, + pf->gtpu_hash_ctx.ipv4.pkt_hdr); + ICE_HASH_CFG_ROTATE_START(pf->gtpu_hash_ctx. + ipv4.rotate); + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_tcp)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv6_tcp.hash_fld, + pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp); + } + + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv6.hash_fld, + pf->gtpu_hash_ctx.ipv6.pkt_hdr); + ICE_HASH_CFG_ROTATE_START(pf->gtpu_hash_ctx. + ipv6.rotate); + } + } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) { + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv4.hash_fld, + pf->gtpu_hash_ctx.ipv4.pkt_hdr); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4); + } + + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_udp)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv4_udp.hash_fld, + pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp); + } + + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_tcp)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv4_tcp.hash_fld, + pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp); + } + } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) { + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv6.hash_fld, + pf->gtpu_hash_ctx.ipv6.pkt_hdr); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6); + } + + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_udp)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv6_udp.hash_fld, + pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp); + } + + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_tcp)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv6_tcp.hash_fld, + pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp); + } + } + } + + return 0; +} + +static int +ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr) +{ + if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) { + if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp); + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp); + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp); + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp); + } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) { + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4); + } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) { + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6); + } + } + + return 0; +} + +int +ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, + uint64_t fld, uint32_t hdr) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + int ret; + + ret = ice_rem_rss_cfg(hw, vsi_id, fld, hdr); + if (ret && ret != ICE_ERR_DOES_NOT_EXIST) + PMD_DRV_LOG(ERR, "remove rss cfg failed\n"); + + ret = ice_rem_rss_cfg_post(pf, hdr); + if (ret) + PMD_DRV_LOG(ERR, "remove rss cfg post failed\n"); + + return 0; +} + +int +ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, + uint64_t fld, uint32_t hdr, bool symm) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + int ret; + + ret = ice_add_rss_cfg_pre(pf, hdr); + if (ret) + PMD_DRV_LOG(ERR, "add rss cfg pre failed\n"); + + ret = ice_add_rss_cfg(hw, vsi_id, fld, hdr, symm); + if (ret) + PMD_DRV_LOG(ERR, "add rss cfg failed\n"); + + ret = ice_add_rss_cfg_post(pf, hdr, fld, symm); + if (ret) + PMD_DRV_LOG(ERR, "add rss cfg post failed\n"); + + return 0; +} + static void ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf) { - struct ice_hw *hw = ICE_PF_TO_HW(pf); struct ice_vsi *vsi = pf->main_vsi; int ret; /* Configure RSS for IPv4 with src/dst addr as input set */ if (rss_hf & ETH_RSS_IPV4) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4, ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) @@ -2448,7 +2727,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) /* Configure RSS for IPv6 with src/dst addr as input set */ if (rss_hf & ETH_RSS_IPV6) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6, ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) @@ -2458,7 +2737,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) /* Configure RSS for udp4 with src/dst addr and port as input set */ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4, ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2469,7 +2748,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) /* Configure RSS for udp6 with src/dst addr and port as input set */ if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6, ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2480,7 +2759,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) /* Configure RSS for tcp4 with src/dst addr and port as input set */ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4, ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2491,7 +2770,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) /* Configure RSS for tcp6 with src/dst addr and port as input set */ if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6, ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2502,7 +2781,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) /* Configure RSS for sctp4 with src/dst addr and port as input set */ if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4, ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2513,7 +2792,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) /* Configure RSS for sctp6 with src/dst addr and port as input set */ if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6, ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2523,7 +2802,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) } if (rss_hf & ETH_RSS_IPV4) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4, ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2531,7 +2810,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "%s GTPU_IPV4 rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4, ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2539,7 +2818,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4 rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4, ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2549,7 +2828,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) } if (rss_hf & ETH_RSS_IPV6) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6, ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2557,7 +2836,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "%s GTPU_IPV6 rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6, ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2565,7 +2844,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6 rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6, ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2575,114 +2854,175 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) } if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4, - ICE_FLOW_SEG_HDR_GTPU_IP, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4, + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_IPV4_UDP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4, - ICE_FLOW_SEG_HDR_GTPU_EH, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4, + ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_UDP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4, - ICE_FLOW_SEG_HDR_PPPOE, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4, + ICE_FLOW_SEG_HDR_PPPOE | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_UDP rss flow fail %d", __func__, ret); } if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6, - ICE_FLOW_SEG_HDR_GTPU_IP, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6, + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_IPV6_UDP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6, - ICE_FLOW_SEG_HDR_GTPU_EH, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6, + ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_UDP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6, - ICE_FLOW_SEG_HDR_PPPOE, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6, + ICE_FLOW_SEG_HDR_PPPOE | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_UDP rss flow fail %d", __func__, ret); } if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4, - ICE_FLOW_SEG_HDR_GTPU_IP, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4, + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_IPV4_TCP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4, - ICE_FLOW_SEG_HDR_GTPU_EH, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4, + ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_TCP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4, - ICE_FLOW_SEG_HDR_PPPOE, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4, + ICE_FLOW_SEG_HDR_PPPOE | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_TCP rss flow fail %d", __func__, ret); } if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6, - ICE_FLOW_SEG_HDR_GTPU_IP, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6, + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_IPV6_TCP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6, - ICE_FLOW_SEG_HDR_GTPU_EH, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6, + ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_TCP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6, - ICE_FLOW_SEG_HDR_PPPOE, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6, + ICE_FLOW_SEG_HDR_PPPOE | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_TCP rss flow fail %d", __func__, ret); } if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_SCTP_IPV4, - ICE_FLOW_SEG_HDR_GTPU_IP, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV4, + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_SCTP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_IPV4_SCTP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_SCTP_IPV4, - ICE_FLOW_SEG_HDR_GTPU_EH, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV4, + ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_SCTP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_SCTP rss flow fail %d", __func__, ret); } if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_SCTP_IPV6, - ICE_FLOW_SEG_HDR_GTPU_IP, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV6, + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_SCTP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_IPV6_SCTP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_SCTP_IPV6, - ICE_FLOW_SEG_HDR_GTPU_EH, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV6, + ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_SCTP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_SCTP rss flow fail %d", __func__, ret); } } +static void +ice_rss_ctx_init(struct ice_pf *pf) +{ + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6); + + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp); + + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp); +} + static int ice_init_rss(struct ice_pf *pf) { struct ice_hw *hw = ICE_PF_TO_HW(pf); @@ -2743,6 +3083,9 @@ static int ice_init_rss(struct ice_pf *pf) (1 << VSIQF_HASH_CTL_HASH_SCHEME_S); ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg); + /* Initialize RSS context for gtpu_eh */ + ice_rss_ctx_init(pf); + /* RSS hash configuration */ ice_rss_hash_set(pf, rss_conf->rss_hf); diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index 87984ef..36c4a87 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -358,6 +358,39 @@ struct ice_fdir_info { struct ice_fdir_counter_pool_container counter; }; +#define ICE_HASH_CFG_VALID(p) \ + ((p)->hash_fld != 0 && (p)->pkt_hdr != 0) + +#define ICE_HASH_CFG_RESET(p) do { \ + (p)->hash_fld = 0; \ + (p)->pkt_hdr = 0; \ +} while (0) + +#define ICE_HASH_CFG_IS_ROTATING(value) \ + (value == true) + +#define ICE_HASH_CFG_ROTATE_START(value) \ + (value = true) + +#define ICE_HASH_CFG_ROTATE_STOP(value) \ + (value = false) + +struct ice_hash_cfg { + uint32_t pkt_hdr; + uint64_t hash_fld; + bool rotate; /* rotate l3 rule after l4 rule. */ + bool symm; +}; + +struct ice_hash_gtpu_ctx { + struct ice_hash_cfg ipv4; + struct ice_hash_cfg ipv6; + struct ice_hash_cfg ipv4_udp; + struct ice_hash_cfg ipv6_udp; + struct ice_hash_cfg ipv4_tcp; + struct ice_hash_cfg ipv6_tcp; +}; + struct ice_pf { struct ice_adapter *adapter; /* The adapter this PF associate to */ struct ice_vsi *main_vsi; /* pointer to main VSI structure */ @@ -381,6 +414,7 @@ struct ice_pf { uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */ uint16_t fdir_qp_offset; struct ice_fdir_info fdir; /* flow director info */ + struct ice_hash_gtpu_ctx gtpu_hash_ctx; uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; struct ice_hw_port_stats stats_offset; @@ -482,6 +516,10 @@ struct ice_vsi * void ice_vsi_enable_queues_intr(struct ice_vsi *vsi); void ice_vsi_disable_queues_intr(struct ice_vsi *vsi); void ice_vsi_queues_bind_intr(struct ice_vsi *vsi); +int ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, + uint64_t hash_fld, uint32_t pkt_hdr, bool symm); +int ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, + uint64_t hash_fld, uint32_t pkt_hdr); static inline int ice_align_floor(int n) diff --git a/drivers/net/ice/ice_hash.c b/drivers/net/ice/ice_hash.c index fdfaff7..1ea0e70 100644 --- a/drivers/net/ice/ice_hash.c +++ b/drivers/net/ice/ice_hash.c @@ -1271,7 +1271,7 @@ struct ice_hash_match_type ice_hash_type_list[] = { (hash_function == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ); - ret = ice_add_rss_cfg(hw, vsi->idx, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, filter_ptr->rss_cfg.hashed_flds, filter_ptr->rss_cfg.packet_hdr, filter_ptr->rss_cfg.symm); @@ -1315,7 +1315,7 @@ struct ice_hash_match_type ice_hash_type_list[] = { (1 << VSIQF_HASH_CTL_HASH_SCHEME_S); ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg); } else { - ret = ice_rem_rss_cfg(hw, vsi->idx, + ret = ice_rem_rss_cfg_wrap(pf, vsi->idx, filter_ptr->rss_cfg.hashed_flds, filter_ptr->rss_cfg.packet_hdr); /* Fixme: Ignore the error if a rule does not exist. -- 1.8.3.1 ^ permalink raw reply [flat|nested] 8+ messages in thread
* [dpdk-dev] [PATCH v6] net/ice: fix GTPU down/uplink and extension conflict 2020-07-28 8:44 ` [dpdk-dev] [PATCH v5] " Simei Su @ 2020-07-28 11:07 ` Simei Su 0 siblings, 0 replies; 8+ messages in thread From: Simei Su @ 2020-07-28 11:07 UTC (permalink / raw) To: qi.z.zhang, beilei.xing; +Cc: dev, jia.guo, Simei Su When adding a RSS rule with GTPU_DWN/UP, it will write from top to bottom for profile due to firmware limitation. If a RSS rule with GTPU_EH already exists, then GTPU_DWN/UP packet will match GTPU_EH profile. This patch solves this issue by remembering a gtpu_eh RSS configure and removing it before the corresponding RSS configure for downlink/uplink rule is issued. Fixes: 2e2810fc1868 ("net/ice: fix GTPU RSS") Signed-off-by: Simei Su <simei.su@intel.com> --- v6: * Fix coding style issue for macro definition. * Move ice_rss_ctx_init() to dev_init(). v5: * Reconstruct code logic. v4: * Create ice_add_rss_cfg_wrap to replace existed ice_add_rss_cfg with it. * Create ice_rem_rss_cfg_wrap to repalce existed ice_rem_rss_cfg with it. v3: * Rename global structure. * Use some macros to avoid code duplication. * Revise incorrect code where uses local variable. v2: * Refine commit log. * Fix gtpu downlink and uplink can't be issued simultaneously. * Fix gtpu down/uplink ipv4_udp/tcp or ipv6_udp/tcp symmetric don't take effect. --- drivers/net/ice/ice_ethdev.c | 425 ++++++++++++++++++++++++++++++++++++++----- drivers/net/ice/ice_ethdev.h | 38 ++++ drivers/net/ice/ice_hash.c | 4 +- 3 files changed, 418 insertions(+), 49 deletions(-) diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index c4c0e63..eb24879 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -2113,6 +2113,19 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) return 0; } +static void +ice_rss_ctx_init(struct ice_pf *pf) +{ + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6); + + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp); + + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp); +} + static int ice_dev_init(struct rte_eth_dev *dev) { @@ -2244,6 +2257,9 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) /* get base queue pairs index in the device */ ice_base_queue_get(pf); + /* Initialize RSS context for gtpu_eh */ + ice_rss_ctx_init(pf); + if (!ad->is_safe_mode) { ret = ice_flow_init(ad); if (ret) { @@ -2429,16 +2445,283 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) return 0; } +static int +ice_add_rss_cfg_post(struct ice_pf *pf, uint32_t hdr, uint64_t fld, bool symm) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_vsi *vsi = pf->main_vsi; + + if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) { + if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr = hdr; + pf->gtpu_hash_ctx.ipv4_udp.hash_fld = fld; + pf->gtpu_hash_ctx.ipv4_udp.symm = symm; + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr = hdr; + pf->gtpu_hash_ctx.ipv6_udp.hash_fld = fld; + pf->gtpu_hash_ctx.ipv6_udp.symm = symm; + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr = hdr; + pf->gtpu_hash_ctx.ipv4_tcp.hash_fld = fld; + pf->gtpu_hash_ctx.ipv4_tcp.symm = symm; + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr = hdr; + pf->gtpu_hash_ctx.ipv6_tcp.hash_fld = fld; + pf->gtpu_hash_ctx.ipv6_tcp.symm = symm; + } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) { + pf->gtpu_hash_ctx.ipv4.pkt_hdr = hdr; + pf->gtpu_hash_ctx.ipv4.hash_fld = fld; + pf->gtpu_hash_ctx.ipv4.symm = symm; + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp); + } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) { + pf->gtpu_hash_ctx.ipv6.pkt_hdr = hdr; + pf->gtpu_hash_ctx.ipv6.hash_fld = fld; + pf->gtpu_hash_ctx.ipv6.symm = symm; + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp); + } + } + + if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN | + ICE_FLOW_SEG_HDR_GTPU_UP)) { + if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv4)) { + ice_add_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv4.hash_fld, + pf->gtpu_hash_ctx.ipv4.pkt_hdr, + pf->gtpu_hash_ctx.ipv4.symm); + ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv4); + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv6)) { + ice_add_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv6.hash_fld, + pf->gtpu_hash_ctx.ipv6.pkt_hdr, + pf->gtpu_hash_ctx.ipv6.symm); + ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv6); + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv4)) { + ice_add_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv4.hash_fld, + pf->gtpu_hash_ctx.ipv4.pkt_hdr, + pf->gtpu_hash_ctx.ipv4.symm); + ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv4); + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv6)) { + ice_add_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv6.hash_fld, + pf->gtpu_hash_ctx.ipv6.pkt_hdr, + pf->gtpu_hash_ctx.ipv6.symm); + ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv6); + } + } + } + + return 0; +} + +static int +ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + struct ice_vsi *vsi = pf->main_vsi; + + if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN | + ICE_FLOW_SEG_HDR_GTPU_UP)) { + if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_udp)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv4_udp.hash_fld, + pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp); + } + + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv4.hash_fld, + pf->gtpu_hash_ctx.ipv4.pkt_hdr); + ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv4); + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_udp)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv6_udp.hash_fld, + pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp); + } + + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv6.hash_fld, + pf->gtpu_hash_ctx.ipv6.pkt_hdr); + ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv6); + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_tcp)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv4_tcp.hash_fld, + pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp); + } + + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv4.hash_fld, + pf->gtpu_hash_ctx.ipv4.pkt_hdr); + ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv4); + } + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_tcp)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv6_tcp.hash_fld, + pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp); + } + + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv6.hash_fld, + pf->gtpu_hash_ctx.ipv6.pkt_hdr); + ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv6); + } + } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) { + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv4.hash_fld, + pf->gtpu_hash_ctx.ipv4.pkt_hdr); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4); + } + + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_udp)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv4_udp.hash_fld, + pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp); + } + + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_tcp)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv4_tcp.hash_fld, + pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp); + } + } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) { + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv6.hash_fld, + pf->gtpu_hash_ctx.ipv6.pkt_hdr); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6); + } + + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_udp)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv6_udp.hash_fld, + pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp); + } + + if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_tcp)) { + ice_rem_rss_cfg(hw, vsi->idx, + pf->gtpu_hash_ctx.ipv6_tcp.hash_fld, + pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr); + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp); + } + } + } + + return 0; +} + +static int +ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr) +{ + if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) { + if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp); + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_UDP)) { + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp); + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp); + } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) && + (hdr & ICE_FLOW_SEG_HDR_TCP)) { + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp); + } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) { + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4); + } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) { + ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6); + } + } + + return 0; +} + +int +ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, + uint64_t fld, uint32_t hdr) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + int ret; + + ret = ice_rem_rss_cfg(hw, vsi_id, fld, hdr); + if (ret && ret != ICE_ERR_DOES_NOT_EXIST) + PMD_DRV_LOG(ERR, "remove rss cfg failed\n"); + + ret = ice_rem_rss_cfg_post(pf, hdr); + if (ret) + PMD_DRV_LOG(ERR, "remove rss cfg post failed\n"); + + return 0; +} + +int +ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, + uint64_t fld, uint32_t hdr, bool symm) +{ + struct ice_hw *hw = ICE_PF_TO_HW(pf); + int ret; + + ret = ice_add_rss_cfg_pre(pf, hdr); + if (ret) + PMD_DRV_LOG(ERR, "add rss cfg pre failed\n"); + + ret = ice_add_rss_cfg(hw, vsi_id, fld, hdr, symm); + if (ret) + PMD_DRV_LOG(ERR, "add rss cfg failed\n"); + + ret = ice_add_rss_cfg_post(pf, hdr, fld, symm); + if (ret) + PMD_DRV_LOG(ERR, "add rss cfg post failed\n"); + + return 0; +} + static void ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf) { - struct ice_hw *hw = ICE_PF_TO_HW(pf); struct ice_vsi *vsi = pf->main_vsi; int ret; /* Configure RSS for IPv4 with src/dst addr as input set */ if (rss_hf & ETH_RSS_IPV4) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4, ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) @@ -2448,7 +2731,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) /* Configure RSS for IPv6 with src/dst addr as input set */ if (rss_hf & ETH_RSS_IPV6) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6, ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) @@ -2458,7 +2741,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) /* Configure RSS for udp4 with src/dst addr and port as input set */ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4, ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2469,7 +2752,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) /* Configure RSS for udp6 with src/dst addr and port as input set */ if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6, ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2480,7 +2763,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) /* Configure RSS for tcp4 with src/dst addr and port as input set */ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4, ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2491,7 +2774,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) /* Configure RSS for tcp6 with src/dst addr and port as input set */ if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6, ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2502,7 +2785,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) /* Configure RSS for sctp4 with src/dst addr and port as input set */ if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4, ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2513,7 +2796,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) /* Configure RSS for sctp6 with src/dst addr and port as input set */ if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6, ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2523,7 +2806,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) } if (rss_hf & ETH_RSS_IPV4) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4, ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2531,7 +2814,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "%s GTPU_IPV4 rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4, ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2539,7 +2822,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4 rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4, ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2549,7 +2832,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) } if (rss_hf & ETH_RSS_IPV6) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6, ICE_FLOW_SEG_HDR_GTPU_IP | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2557,7 +2840,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "%s GTPU_IPV6 rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6, ICE_FLOW_SEG_HDR_GTPU_EH | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2565,7 +2848,7 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6 rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6, ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER, 0); @@ -2575,108 +2858,156 @@ static int ice_parse_devargs(struct rte_eth_dev *dev) } if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4, - ICE_FLOW_SEG_HDR_GTPU_IP, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4, + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_IPV4_UDP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4, - ICE_FLOW_SEG_HDR_GTPU_EH, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4, + ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_UDP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4, - ICE_FLOW_SEG_HDR_PPPOE, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4, + ICE_FLOW_SEG_HDR_PPPOE | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_UDP rss flow fail %d", __func__, ret); } if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6, - ICE_FLOW_SEG_HDR_GTPU_IP, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6, + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_IPV6_UDP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6, - ICE_FLOW_SEG_HDR_GTPU_EH, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6, + ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_UDP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6, - ICE_FLOW_SEG_HDR_PPPOE, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6, + ICE_FLOW_SEG_HDR_PPPOE | + ICE_FLOW_SEG_HDR_UDP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_UDP rss flow fail %d", __func__, ret); } if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4, - ICE_FLOW_SEG_HDR_GTPU_IP, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4, + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_IPV4_TCP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4, - ICE_FLOW_SEG_HDR_GTPU_EH, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4, + ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_TCP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4, - ICE_FLOW_SEG_HDR_PPPOE, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4, + ICE_FLOW_SEG_HDR_PPPOE | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_TCP rss flow fail %d", __func__, ret); } if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6, - ICE_FLOW_SEG_HDR_GTPU_IP, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6, + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_IPV6_TCP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6, - ICE_FLOW_SEG_HDR_GTPU_EH, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6, + ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_TCP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6, - ICE_FLOW_SEG_HDR_PPPOE, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6, + ICE_FLOW_SEG_HDR_PPPOE | + ICE_FLOW_SEG_HDR_TCP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_TCP rss flow fail %d", __func__, ret); } if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_SCTP_IPV4, - ICE_FLOW_SEG_HDR_GTPU_IP, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV4, + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_SCTP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_IPV4_SCTP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_SCTP_IPV4, - ICE_FLOW_SEG_HDR_GTPU_EH, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV4, + ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_SCTP | + ICE_FLOW_SEG_HDR_IPV4 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_SCTP rss flow fail %d", __func__, ret); } if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) { - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_SCTP_IPV6, - ICE_FLOW_SEG_HDR_GTPU_IP, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV6, + ICE_FLOW_SEG_HDR_GTPU_IP | + ICE_FLOW_SEG_HDR_SCTP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_IPV6_SCTP rss flow fail %d", __func__, ret); - ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_SCTP_IPV6, - ICE_FLOW_SEG_HDR_GTPU_EH, 0); + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV6, + ICE_FLOW_SEG_HDR_GTPU_EH | + ICE_FLOW_SEG_HDR_SCTP | + ICE_FLOW_SEG_HDR_IPV6 | + ICE_FLOW_SEG_HDR_IPV_OTHER, 0); if (ret) PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_SCTP rss flow fail %d", __func__, ret); diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h index 87984ef..393dfea 100644 --- a/drivers/net/ice/ice_ethdev.h +++ b/drivers/net/ice/ice_ethdev.h @@ -358,6 +358,39 @@ struct ice_fdir_info { struct ice_fdir_counter_pool_container counter; }; +#define ICE_HASH_CFG_VALID(p) \ + ((p)->hash_fld != 0 && (p)->pkt_hdr != 0) + +#define ICE_HASH_CFG_RESET(p) do { \ + (p)->hash_fld = 0; \ + (p)->pkt_hdr = 0; \ +} while (0) + +#define ICE_HASH_CFG_IS_ROTATING(p) \ + ((p)->rotate == true) + +#define ICE_HASH_CFG_ROTATE_START(p) \ + ((p)->rotate = true) + +#define ICE_HASH_CFG_ROTATE_STOP(p) \ + ((p)->rotate = false) + +struct ice_hash_cfg { + uint32_t pkt_hdr; + uint64_t hash_fld; + bool rotate; /* rotate l3 rule after l4 rule. */ + bool symm; +}; + +struct ice_hash_gtpu_ctx { + struct ice_hash_cfg ipv4; + struct ice_hash_cfg ipv6; + struct ice_hash_cfg ipv4_udp; + struct ice_hash_cfg ipv6_udp; + struct ice_hash_cfg ipv4_tcp; + struct ice_hash_cfg ipv6_tcp; +}; + struct ice_pf { struct ice_adapter *adapter; /* The adapter this PF associate to */ struct ice_vsi *main_vsi; /* pointer to main VSI structure */ @@ -381,6 +414,7 @@ struct ice_pf { uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */ uint16_t fdir_qp_offset; struct ice_fdir_info fdir; /* flow director info */ + struct ice_hash_gtpu_ctx gtpu_hash_ctx; uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; struct ice_hw_port_stats stats_offset; @@ -482,6 +516,10 @@ struct ice_vsi * void ice_vsi_enable_queues_intr(struct ice_vsi *vsi); void ice_vsi_disable_queues_intr(struct ice_vsi *vsi); void ice_vsi_queues_bind_intr(struct ice_vsi *vsi); +int ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, + uint64_t hash_fld, uint32_t pkt_hdr, bool symm); +int ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id, + uint64_t hash_fld, uint32_t pkt_hdr); static inline int ice_align_floor(int n) diff --git a/drivers/net/ice/ice_hash.c b/drivers/net/ice/ice_hash.c index fdfaff7..1ea0e70 100644 --- a/drivers/net/ice/ice_hash.c +++ b/drivers/net/ice/ice_hash.c @@ -1271,7 +1271,7 @@ struct ice_hash_match_type ice_hash_type_list[] = { (hash_function == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ); - ret = ice_add_rss_cfg(hw, vsi->idx, + ret = ice_add_rss_cfg_wrap(pf, vsi->idx, filter_ptr->rss_cfg.hashed_flds, filter_ptr->rss_cfg.packet_hdr, filter_ptr->rss_cfg.symm); @@ -1315,7 +1315,7 @@ struct ice_hash_match_type ice_hash_type_list[] = { (1 << VSIQF_HASH_CTL_HASH_SCHEME_S); ICE_WRITE_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id), reg); } else { - ret = ice_rem_rss_cfg(hw, vsi->idx, + ret = ice_rem_rss_cfg_wrap(pf, vsi->idx, filter_ptr->rss_cfg.hashed_flds, filter_ptr->rss_cfg.packet_hdr); /* Fixme: Ignore the error if a rule does not exist. -- 1.8.3.1 ^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2020-07-28 11:11 UTC | newest] Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed) -- links below jump to the message on this page -- 2020-07-24 2:10 [dpdk-dev] [PATCH] net/ice: fix GTPU down/uplink and extension conflict Simei Su 2020-07-24 7:13 ` Jeff Guo 2020-07-24 14:19 ` Su, Simei 2020-07-24 14:41 ` [dpdk-dev] [PATCH v2] " Simei Su 2020-07-26 3:13 ` [dpdk-dev] [PATCH v3] " Simei Su 2020-07-27 9:38 ` [dpdk-dev] [PATCH v4] " Simei Su 2020-07-28 8:44 ` [dpdk-dev] [PATCH v5] " Simei Su 2020-07-28 11:07 ` [dpdk-dev] [PATCH v6] " Simei Su
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).