* [dpdk-dev] [PATCH] net/ice: refactor RSS config for potential bugs
@ 2020-10-12 7:54 Junfeng Guo
2020-10-13 6:25 ` [dpdk-dev] [PATCH v2] " Junfeng Guo
0 siblings, 1 reply; 3+ messages in thread
From: Junfeng Guo @ 2020-10-12 7:54 UTC (permalink / raw)
To: qi.z.zhang, jingjing.wu, beilei.xing; +Cc: dev, junfeng.guo
Current implementation for PF RSS config wrap function has some
potential bugs about GTPU, e.g., same input set for GTPU inner and
non-TUN have different hash values, which should be same.
Current implementation for AVF has a better design and does not have
above bugs. Thus we just reimplement the wrap function to align with
AVF RSS.
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
drivers/net/ice/ice_ethdev.c | 615 +++++++++++++++++++++++------------
drivers/net/ice/ice_ethdev.h | 56 ++--
2 files changed, 440 insertions(+), 231 deletions(-)
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index d8ce09d28..0056da78a 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -2087,14 +2087,7 @@ ice_reset_fxp_resource(struct ice_hw *hw)
static void
ice_rss_ctx_init(struct ice_pf *pf)
{
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6);
-
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
-
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
+ memset(&pf->hash_ctx, 0, sizeof(pf->hash_ctx));
}
static uint64_t
@@ -2438,234 +2431,452 @@ ice_dev_uninit(struct rte_eth_dev *dev)
return 0;
}
+static bool
+is_hash_cfg_valid(struct ice_rss_hash_cfg *cfg)
+{
+ return ((cfg->hash_func >= ICE_RSS_HASH_TOEPLITZ &&
+ cfg->hash_func <= ICE_RSS_HASH_JHASH) &&
+ (cfg->hash_flds != 0 && cfg->addl_hdrs != 0)) ?
+ true : false;
+}
+
+static void
+hash_cfg_reset(struct ice_rss_hash_cfg *cfg)
+{
+ cfg->hash_flds = 0;
+ cfg->addl_hdrs = 0;
+ cfg->hash_func = 0;
+}
+
static int
-ice_add_rss_cfg_post(struct ice_pf *pf, uint32_t hdr, uint64_t fld, bool symm)
+ice_hash_moveout(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
{
+ enum ice_status status = ICE_SUCCESS;
struct ice_hw *hw = ICE_PF_TO_HW(pf);
struct ice_vsi *vsi = pf->main_vsi;
- if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) {
- if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
- (hdr & ICE_FLOW_SEG_HDR_UDP)) {
- pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr = hdr;
- pf->gtpu_hash_ctx.ipv4_udp.hash_fld = fld;
- pf->gtpu_hash_ctx.ipv4_udp.symm = symm;
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
- (hdr & ICE_FLOW_SEG_HDR_UDP)) {
- pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr = hdr;
- pf->gtpu_hash_ctx.ipv6_udp.hash_fld = fld;
- pf->gtpu_hash_ctx.ipv6_udp.symm = symm;
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
- (hdr & ICE_FLOW_SEG_HDR_TCP)) {
- pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr = hdr;
- pf->gtpu_hash_ctx.ipv4_tcp.hash_fld = fld;
- pf->gtpu_hash_ctx.ipv4_tcp.symm = symm;
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
- (hdr & ICE_FLOW_SEG_HDR_TCP)) {
- pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr = hdr;
- pf->gtpu_hash_ctx.ipv6_tcp.hash_fld = fld;
- pf->gtpu_hash_ctx.ipv6_tcp.symm = symm;
- } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
- pf->gtpu_hash_ctx.ipv4.pkt_hdr = hdr;
- pf->gtpu_hash_ctx.ipv4.hash_fld = fld;
- pf->gtpu_hash_ctx.ipv4.symm = symm;
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
- } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
- pf->gtpu_hash_ctx.ipv6.pkt_hdr = hdr;
- pf->gtpu_hash_ctx.ipv6.hash_fld = fld;
- pf->gtpu_hash_ctx.ipv6.symm = symm;
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
- }
- }
+ if (!is_hash_cfg_valid(cfg))
+ return -ENOENT;
- if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN |
- ICE_FLOW_SEG_HDR_GTPU_UP)) {
- if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
- (hdr & ICE_FLOW_SEG_HDR_UDP)) {
- if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv4)) {
- ice_add_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4.hash_fld,
- pf->gtpu_hash_ctx.ipv4.pkt_hdr,
- pf->gtpu_hash_ctx.ipv4.symm);
- ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv4);
- }
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
- (hdr & ICE_FLOW_SEG_HDR_UDP)) {
- if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv6)) {
- ice_add_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6.hash_fld,
- pf->gtpu_hash_ctx.ipv6.pkt_hdr,
- pf->gtpu_hash_ctx.ipv6.symm);
- ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv6);
- }
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
- (hdr & ICE_FLOW_SEG_HDR_TCP)) {
- if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv4)) {
- ice_add_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4.hash_fld,
- pf->gtpu_hash_ctx.ipv4.pkt_hdr,
- pf->gtpu_hash_ctx.ipv4.symm);
- ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv4);
- }
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
- (hdr & ICE_FLOW_SEG_HDR_TCP)) {
- if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv6)) {
- ice_add_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6.hash_fld,
- pf->gtpu_hash_ctx.ipv6.pkt_hdr,
- pf->gtpu_hash_ctx.ipv6.symm);
- ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv6);
- }
- }
+ status = ice_rem_rss_cfg(hw, vsi->idx, cfg->hash_flds,
+ cfg->addl_hdrs);
+ if (status && status != ICE_ERR_DOES_NOT_EXIST) {
+ PMD_DRV_LOG(ERR,
+ "ice_rem_rss_cfg failed for VSI:%d, error:%d\n",
+ vsi->idx, status);
+ return -EBUSY;
}
return 0;
}
static int
-ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr)
+ice_hash_moveback(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
{
+ enum ice_status status = ICE_SUCCESS;
struct ice_hw *hw = ICE_PF_TO_HW(pf);
struct ice_vsi *vsi = pf->main_vsi;
+ bool symm;
- if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN |
- ICE_FLOW_SEG_HDR_GTPU_UP)) {
- if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
- (hdr & ICE_FLOW_SEG_HDR_UDP)) {
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_udp)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4_udp.hash_fld,
- pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
- }
+ if (!is_hash_cfg_valid(cfg))
+ return -ENOENT;
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4.hash_fld,
- pf->gtpu_hash_ctx.ipv4.pkt_hdr);
- ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv4);
- }
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
- (hdr & ICE_FLOW_SEG_HDR_UDP)) {
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_udp)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6_udp.hash_fld,
- pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
- }
+ symm = (cfg->hash_func == ICE_RSS_HASH_TOEPLITZ_SYMMETRIC) ?
+ true : false;
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6.hash_fld,
- pf->gtpu_hash_ctx.ipv6.pkt_hdr);
- ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv6);
- }
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
- (hdr & ICE_FLOW_SEG_HDR_TCP)) {
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_tcp)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4_tcp.hash_fld,
- pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
- }
+ status = ice_add_rss_cfg(hw, vsi->idx, cfg->hash_flds,
+ cfg->addl_hdrs, symm);
+ if (status) {
+ PMD_DRV_LOG(ERR,
+ "ice_add_rss_cfg failed for VSI:%d, error:%d\n",
+ vsi->idx, status);
+ return -EBUSY;
+ }
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4.hash_fld,
- pf->gtpu_hash_ctx.ipv4.pkt_hdr);
- ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv4);
- }
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
- (hdr & ICE_FLOW_SEG_HDR_TCP)) {
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_tcp)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6_tcp.hash_fld,
- pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
- }
+ return 0;
+}
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6.hash_fld,
- pf->gtpu_hash_ctx.ipv6.pkt_hdr);
- ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv6);
- }
- } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4.hash_fld,
- pf->gtpu_hash_ctx.ipv4.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4);
- }
+static int
+ice_hash_remove(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
+{
+ int ret;
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_udp)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4_udp.hash_fld,
- pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
- }
+ ret = ice_hash_moveout(pf, cfg);
+ if (ret && (ret != -ENOENT))
+ return ret;
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_tcp)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4_tcp.hash_fld,
- pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
- }
- } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6.hash_fld,
- pf->gtpu_hash_ctx.ipv6.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6);
- }
+ hash_cfg_reset(cfg);
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_udp)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6_udp.hash_fld,
- pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
- }
+ return 0;
+}
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_tcp)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6_tcp.hash_fld,
- pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
- }
- }
+static int
+ice_add_rss_cfg_pre_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx,
+ u8 ctx_idx)
+{
+ int ret;
+
+ switch (ctx_idx) {
+ case ICE_HASH_GTPU_CTX_EH_IP:
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ case ICE_HASH_GTPU_CTX_EH_IP_UDP:
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ case ICE_HASH_GTPU_CTX_EH_IP_TCP:
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ case ICE_HASH_GTPU_CTX_UP_IP:
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ case ICE_HASH_GTPU_CTX_UP_IP_UDP:
+ case ICE_HASH_GTPU_CTX_UP_IP_TCP:
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ case ICE_HASH_GTPU_CTX_DW_IP:
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ case ICE_HASH_GTPU_CTX_DW_IP_UDP:
+ case ICE_HASH_GTPU_CTX_DW_IP_TCP:
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ default:
+ break;
}
return 0;
}
+static u8 calc_gtpu_ctx_idx(uint32_t hdr)
+{
+ u8 eh_idx, ip_idx;
+
+ if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH)
+ eh_idx = 0;
+ else if (hdr & ICE_FLOW_SEG_HDR_GTPU_UP)
+ eh_idx = 1;
+ else if (hdr & ICE_FLOW_SEG_HDR_GTPU_DWN)
+ eh_idx = 2;
+ else
+ return ICE_HASH_GTPU_CTX_MAX;
+
+ ip_idx = 0;
+ if (hdr & ICE_FLOW_SEG_HDR_UDP)
+ ip_idx = 1;
+ else if (hdr & ICE_FLOW_SEG_HDR_TCP)
+ ip_idx = 2;
+
+ if (hdr & (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6))
+ return eh_idx * 3 + ip_idx;
+ else
+ return ICE_HASH_GTPU_CTX_MAX;
+}
+
static int
-ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr)
+ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr)
{
- if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) {
- if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
- (hdr & ICE_FLOW_SEG_HDR_UDP)) {
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
- (hdr & ICE_FLOW_SEG_HDR_UDP)) {
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
- (hdr & ICE_FLOW_SEG_HDR_TCP)) {
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
- (hdr & ICE_FLOW_SEG_HDR_TCP)) {
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
- } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4);
- } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6);
- }
+ u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
+
+ if (hdr & ICE_FLOW_SEG_HDR_IPV4)
+ return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu4,
+ gtpu_ctx_idx);
+ else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
+ return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu6,
+ gtpu_ctx_idx);
+
+ return 0;
+}
+
+static int
+ice_add_rss_cfg_post_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx,
+ u32 hdr, u64 fld, bool symm, u8 ctx_idx)
+{
+ int ret;
+
+ if (ctx_idx < ICE_HASH_GTPU_CTX_MAX) {
+ ctx->ctx[ctx_idx].addl_hdrs = hdr;
+ ctx->ctx[ctx_idx].hash_flds = fld;
+ ctx->ctx[ctx_idx].hash_func = symm;
+ }
+
+ switch (ctx_idx) {
+ case ICE_HASH_GTPU_CTX_EH_IP:
+ break;
+ case ICE_HASH_GTPU_CTX_EH_IP_UDP:
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ case ICE_HASH_GTPU_CTX_EH_IP_TCP:
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ case ICE_HASH_GTPU_CTX_UP_IP:
+ case ICE_HASH_GTPU_CTX_UP_IP_UDP:
+ case ICE_HASH_GTPU_CTX_UP_IP_TCP:
+ case ICE_HASH_GTPU_CTX_DW_IP:
+ case ICE_HASH_GTPU_CTX_DW_IP_UDP:
+ case ICE_HASH_GTPU_CTX_DW_IP_TCP:
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ default:
+ break;
}
return 0;
}
+static int
+ice_add_rss_cfg_post(struct ice_pf *pf, uint32_t hdr, uint64_t fld, bool symm)
+{
+ u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
+
+ if (hdr & ICE_FLOW_SEG_HDR_IPV4)
+ return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu4, hdr,
+ fld, symm, gtpu_ctx_idx);
+ else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
+ return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu6, hdr,
+ fld, symm, gtpu_ctx_idx);
+
+ return 0;
+}
+
+static void
+ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr)
+{
+ u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
+
+ if (gtpu_ctx_idx >= ICE_HASH_GTPU_CTX_MAX)
+ return;
+
+ if (hdr & ICE_FLOW_SEG_HDR_IPV4)
+ hash_cfg_reset(&pf->hash_ctx.gtpu4.ctx[gtpu_ctx_idx]);
+ else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
+ hash_cfg_reset(&pf->hash_ctx.gtpu6.ctx[gtpu_ctx_idx]);
+}
+
int
ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
uint64_t fld, uint32_t hdr)
@@ -2677,9 +2888,7 @@ ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
if (ret && ret != ICE_ERR_DOES_NOT_EXIST)
PMD_DRV_LOG(ERR, "remove rss cfg failed\n");
- ret = ice_rem_rss_cfg_post(pf, hdr);
- if (ret)
- PMD_DRV_LOG(ERR, "remove rss cfg post failed\n");
+ ice_rem_rss_cfg_post(pf, hdr);
return 0;
}
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 37b956e2f..978909603 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -365,37 +365,37 @@ struct ice_fdir_info {
struct ice_fdir_counter_pool_container counter;
};
-#define ICE_HASH_CFG_VALID(p) \
- ((p)->hash_fld != 0 && (p)->pkt_hdr != 0)
-
-#define ICE_HASH_CFG_RESET(p) do { \
- (p)->hash_fld = 0; \
- (p)->pkt_hdr = 0; \
-} while (0)
-
-#define ICE_HASH_CFG_IS_ROTATING(p) \
- ((p)->rotate == true)
-
-#define ICE_HASH_CFG_ROTATE_START(p) \
- ((p)->rotate = true)
-
-#define ICE_HASH_CFG_ROTATE_STOP(p) \
- ((p)->rotate = false)
+#define ICE_HASH_GTPU_CTX_EH_IP 0
+#define ICE_HASH_GTPU_CTX_EH_IP_UDP 1
+#define ICE_HASH_GTPU_CTX_EH_IP_TCP 2
+#define ICE_HASH_GTPU_CTX_UP_IP 3
+#define ICE_HASH_GTPU_CTX_UP_IP_UDP 4
+#define ICE_HASH_GTPU_CTX_UP_IP_TCP 5
+#define ICE_HASH_GTPU_CTX_DW_IP 6
+#define ICE_HASH_GTPU_CTX_DW_IP_UDP 7
+#define ICE_HASH_GTPU_CTX_DW_IP_TCP 8
+#define ICE_HASH_GTPU_CTX_MAX 9
+
+enum ice_rss_hash_func {
+ ICE_RSS_HASH_TOEPLITZ = 0,
+ ICE_RSS_HASH_TOEPLITZ_SYMMETRIC = 1,
+ ICE_RSS_HASH_XOR = 2,
+ ICE_RSS_HASH_JHASH = 3,
+};
-struct ice_hash_cfg {
- uint32_t pkt_hdr;
- uint64_t hash_fld;
- bool rotate; /* rotate l3 rule after l4 rule. */
- bool symm;
+struct ice_rss_hash_cfg {
+ u32 addl_hdrs;
+ u64 hash_flds;
+ enum ice_rss_hash_func hash_func;
};
struct ice_hash_gtpu_ctx {
- struct ice_hash_cfg ipv4;
- struct ice_hash_cfg ipv6;
- struct ice_hash_cfg ipv4_udp;
- struct ice_hash_cfg ipv6_udp;
- struct ice_hash_cfg ipv4_tcp;
- struct ice_hash_cfg ipv6_tcp;
+ struct ice_rss_hash_cfg ctx[ICE_HASH_GTPU_CTX_MAX];
+};
+
+struct ice_hash_ctx {
+ struct ice_hash_gtpu_ctx gtpu4;
+ struct ice_hash_gtpu_ctx gtpu6;
};
struct ice_pf {
@@ -421,7 +421,7 @@ struct ice_pf {
uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
uint16_t fdir_qp_offset;
struct ice_fdir_info fdir; /* flow director info */
- struct ice_hash_gtpu_ctx gtpu_hash_ctx;
+ struct ice_hash_ctx hash_ctx;
uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
struct ice_hw_port_stats stats_offset;
--
2.25.1
^ permalink raw reply [flat|nested] 3+ messages in thread
* [dpdk-dev] [PATCH v2] net/ice: refactor RSS config for potential bugs
2020-10-12 7:54 [dpdk-dev] [PATCH] net/ice: refactor RSS config for potential bugs Junfeng Guo
@ 2020-10-13 6:25 ` Junfeng Guo
2020-10-13 7:51 ` Zhang, Qi Z
0 siblings, 1 reply; 3+ messages in thread
From: Junfeng Guo @ 2020-10-13 6:25 UTC (permalink / raw)
To: qi.z.zhang, jingjing.wu, beilei.xing; +Cc: dev, junfeng.guo
Current implementation for PF RSS config wrap function has some
potential bugs about GTPU, e.g., same input set for GTPU inner and
non-TUN have different hash values, which should be same. Thus, we
use extra pre and post processing to re-config GTPU rules.
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
drivers/net/ice/ice_ethdev.c | 615 +++++++++++++++++++++++------------
drivers/net/ice/ice_ethdev.h | 56 ++--
2 files changed, 440 insertions(+), 231 deletions(-)
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index d8ce09d28..0056da78a 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -2087,14 +2087,7 @@ ice_reset_fxp_resource(struct ice_hw *hw)
static void
ice_rss_ctx_init(struct ice_pf *pf)
{
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6);
-
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
-
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
+ memset(&pf->hash_ctx, 0, sizeof(pf->hash_ctx));
}
static uint64_t
@@ -2438,234 +2431,452 @@ ice_dev_uninit(struct rte_eth_dev *dev)
return 0;
}
+static bool
+is_hash_cfg_valid(struct ice_rss_hash_cfg *cfg)
+{
+ return ((cfg->hash_func >= ICE_RSS_HASH_TOEPLITZ &&
+ cfg->hash_func <= ICE_RSS_HASH_JHASH) &&
+ (cfg->hash_flds != 0 && cfg->addl_hdrs != 0)) ?
+ true : false;
+}
+
+static void
+hash_cfg_reset(struct ice_rss_hash_cfg *cfg)
+{
+ cfg->hash_flds = 0;
+ cfg->addl_hdrs = 0;
+ cfg->hash_func = 0;
+}
+
static int
-ice_add_rss_cfg_post(struct ice_pf *pf, uint32_t hdr, uint64_t fld, bool symm)
+ice_hash_moveout(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
{
+ enum ice_status status = ICE_SUCCESS;
struct ice_hw *hw = ICE_PF_TO_HW(pf);
struct ice_vsi *vsi = pf->main_vsi;
- if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) {
- if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
- (hdr & ICE_FLOW_SEG_HDR_UDP)) {
- pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr = hdr;
- pf->gtpu_hash_ctx.ipv4_udp.hash_fld = fld;
- pf->gtpu_hash_ctx.ipv4_udp.symm = symm;
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
- (hdr & ICE_FLOW_SEG_HDR_UDP)) {
- pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr = hdr;
- pf->gtpu_hash_ctx.ipv6_udp.hash_fld = fld;
- pf->gtpu_hash_ctx.ipv6_udp.symm = symm;
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
- (hdr & ICE_FLOW_SEG_HDR_TCP)) {
- pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr = hdr;
- pf->gtpu_hash_ctx.ipv4_tcp.hash_fld = fld;
- pf->gtpu_hash_ctx.ipv4_tcp.symm = symm;
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
- (hdr & ICE_FLOW_SEG_HDR_TCP)) {
- pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr = hdr;
- pf->gtpu_hash_ctx.ipv6_tcp.hash_fld = fld;
- pf->gtpu_hash_ctx.ipv6_tcp.symm = symm;
- } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
- pf->gtpu_hash_ctx.ipv4.pkt_hdr = hdr;
- pf->gtpu_hash_ctx.ipv4.hash_fld = fld;
- pf->gtpu_hash_ctx.ipv4.symm = symm;
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
- } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
- pf->gtpu_hash_ctx.ipv6.pkt_hdr = hdr;
- pf->gtpu_hash_ctx.ipv6.hash_fld = fld;
- pf->gtpu_hash_ctx.ipv6.symm = symm;
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
- }
- }
+ if (!is_hash_cfg_valid(cfg))
+ return -ENOENT;
- if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN |
- ICE_FLOW_SEG_HDR_GTPU_UP)) {
- if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
- (hdr & ICE_FLOW_SEG_HDR_UDP)) {
- if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv4)) {
- ice_add_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4.hash_fld,
- pf->gtpu_hash_ctx.ipv4.pkt_hdr,
- pf->gtpu_hash_ctx.ipv4.symm);
- ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv4);
- }
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
- (hdr & ICE_FLOW_SEG_HDR_UDP)) {
- if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv6)) {
- ice_add_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6.hash_fld,
- pf->gtpu_hash_ctx.ipv6.pkt_hdr,
- pf->gtpu_hash_ctx.ipv6.symm);
- ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv6);
- }
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
- (hdr & ICE_FLOW_SEG_HDR_TCP)) {
- if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv4)) {
- ice_add_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4.hash_fld,
- pf->gtpu_hash_ctx.ipv4.pkt_hdr,
- pf->gtpu_hash_ctx.ipv4.symm);
- ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv4);
- }
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
- (hdr & ICE_FLOW_SEG_HDR_TCP)) {
- if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv6)) {
- ice_add_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6.hash_fld,
- pf->gtpu_hash_ctx.ipv6.pkt_hdr,
- pf->gtpu_hash_ctx.ipv6.symm);
- ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv6);
- }
- }
+ status = ice_rem_rss_cfg(hw, vsi->idx, cfg->hash_flds,
+ cfg->addl_hdrs);
+ if (status && status != ICE_ERR_DOES_NOT_EXIST) {
+ PMD_DRV_LOG(ERR,
+ "ice_rem_rss_cfg failed for VSI:%d, error:%d\n",
+ vsi->idx, status);
+ return -EBUSY;
}
return 0;
}
static int
-ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr)
+ice_hash_moveback(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
{
+ enum ice_status status = ICE_SUCCESS;
struct ice_hw *hw = ICE_PF_TO_HW(pf);
struct ice_vsi *vsi = pf->main_vsi;
+ bool symm;
- if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN |
- ICE_FLOW_SEG_HDR_GTPU_UP)) {
- if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
- (hdr & ICE_FLOW_SEG_HDR_UDP)) {
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_udp)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4_udp.hash_fld,
- pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
- }
+ if (!is_hash_cfg_valid(cfg))
+ return -ENOENT;
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4.hash_fld,
- pf->gtpu_hash_ctx.ipv4.pkt_hdr);
- ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv4);
- }
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
- (hdr & ICE_FLOW_SEG_HDR_UDP)) {
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_udp)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6_udp.hash_fld,
- pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
- }
+ symm = (cfg->hash_func == ICE_RSS_HASH_TOEPLITZ_SYMMETRIC) ?
+ true : false;
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6.hash_fld,
- pf->gtpu_hash_ctx.ipv6.pkt_hdr);
- ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv6);
- }
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
- (hdr & ICE_FLOW_SEG_HDR_TCP)) {
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_tcp)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4_tcp.hash_fld,
- pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
- }
+ status = ice_add_rss_cfg(hw, vsi->idx, cfg->hash_flds,
+ cfg->addl_hdrs, symm);
+ if (status) {
+ PMD_DRV_LOG(ERR,
+ "ice_add_rss_cfg failed for VSI:%d, error:%d\n",
+ vsi->idx, status);
+ return -EBUSY;
+ }
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4.hash_fld,
- pf->gtpu_hash_ctx.ipv4.pkt_hdr);
- ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv4);
- }
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
- (hdr & ICE_FLOW_SEG_HDR_TCP)) {
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_tcp)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6_tcp.hash_fld,
- pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
- }
+ return 0;
+}
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6.hash_fld,
- pf->gtpu_hash_ctx.ipv6.pkt_hdr);
- ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv6);
- }
- } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4.hash_fld,
- pf->gtpu_hash_ctx.ipv4.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4);
- }
+static int
+ice_hash_remove(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
+{
+ int ret;
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_udp)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4_udp.hash_fld,
- pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
- }
+ ret = ice_hash_moveout(pf, cfg);
+ if (ret && (ret != -ENOENT))
+ return ret;
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_tcp)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4_tcp.hash_fld,
- pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
- }
- } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6.hash_fld,
- pf->gtpu_hash_ctx.ipv6.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6);
- }
+ hash_cfg_reset(cfg);
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_udp)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6_udp.hash_fld,
- pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
- }
+ return 0;
+}
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_tcp)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6_tcp.hash_fld,
- pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
- }
- }
+static int
+ice_add_rss_cfg_pre_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx,
+ u8 ctx_idx)
+{
+ int ret;
+
+ switch (ctx_idx) {
+ case ICE_HASH_GTPU_CTX_EH_IP:
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ case ICE_HASH_GTPU_CTX_EH_IP_UDP:
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ case ICE_HASH_GTPU_CTX_EH_IP_TCP:
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ case ICE_HASH_GTPU_CTX_UP_IP:
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ case ICE_HASH_GTPU_CTX_UP_IP_UDP:
+ case ICE_HASH_GTPU_CTX_UP_IP_TCP:
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ case ICE_HASH_GTPU_CTX_DW_IP:
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ case ICE_HASH_GTPU_CTX_DW_IP_UDP:
+ case ICE_HASH_GTPU_CTX_DW_IP_TCP:
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ default:
+ break;
}
return 0;
}
+static u8 calc_gtpu_ctx_idx(uint32_t hdr)
+{
+ u8 eh_idx, ip_idx;
+
+ if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH)
+ eh_idx = 0;
+ else if (hdr & ICE_FLOW_SEG_HDR_GTPU_UP)
+ eh_idx = 1;
+ else if (hdr & ICE_FLOW_SEG_HDR_GTPU_DWN)
+ eh_idx = 2;
+ else
+ return ICE_HASH_GTPU_CTX_MAX;
+
+ ip_idx = 0;
+ if (hdr & ICE_FLOW_SEG_HDR_UDP)
+ ip_idx = 1;
+ else if (hdr & ICE_FLOW_SEG_HDR_TCP)
+ ip_idx = 2;
+
+ if (hdr & (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6))
+ return eh_idx * 3 + ip_idx;
+ else
+ return ICE_HASH_GTPU_CTX_MAX;
+}
+
static int
-ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr)
+ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr)
{
- if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) {
- if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
- (hdr & ICE_FLOW_SEG_HDR_UDP)) {
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
- (hdr & ICE_FLOW_SEG_HDR_UDP)) {
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
- (hdr & ICE_FLOW_SEG_HDR_TCP)) {
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
- (hdr & ICE_FLOW_SEG_HDR_TCP)) {
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
- } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4);
- } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6);
- }
+ u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
+
+ if (hdr & ICE_FLOW_SEG_HDR_IPV4)
+ return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu4,
+ gtpu_ctx_idx);
+ else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
+ return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu6,
+ gtpu_ctx_idx);
+
+ return 0;
+}
+
+static int
+ice_add_rss_cfg_post_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx,
+ u32 hdr, u64 fld, bool symm, u8 ctx_idx)
+{
+ int ret;
+
+ if (ctx_idx < ICE_HASH_GTPU_CTX_MAX) {
+ ctx->ctx[ctx_idx].addl_hdrs = hdr;
+ ctx->ctx[ctx_idx].hash_flds = fld;
+ ctx->ctx[ctx_idx].hash_func = symm;
+ }
+
+ switch (ctx_idx) {
+ case ICE_HASH_GTPU_CTX_EH_IP:
+ break;
+ case ICE_HASH_GTPU_CTX_EH_IP_UDP:
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ case ICE_HASH_GTPU_CTX_EH_IP_TCP:
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ case ICE_HASH_GTPU_CTX_UP_IP:
+ case ICE_HASH_GTPU_CTX_UP_IP_UDP:
+ case ICE_HASH_GTPU_CTX_UP_IP_TCP:
+ case ICE_HASH_GTPU_CTX_DW_IP:
+ case ICE_HASH_GTPU_CTX_DW_IP_UDP:
+ case ICE_HASH_GTPU_CTX_DW_IP_TCP:
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ default:
+ break;
}
return 0;
}
+static int
+ice_add_rss_cfg_post(struct ice_pf *pf, uint32_t hdr, uint64_t fld, bool symm)
+{
+ u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
+
+ if (hdr & ICE_FLOW_SEG_HDR_IPV4)
+ return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu4, hdr,
+ fld, symm, gtpu_ctx_idx);
+ else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
+ return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu6, hdr,
+ fld, symm, gtpu_ctx_idx);
+
+ return 0;
+}
+
+static void
+ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr)
+{
+ u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
+
+ if (gtpu_ctx_idx >= ICE_HASH_GTPU_CTX_MAX)
+ return;
+
+ if (hdr & ICE_FLOW_SEG_HDR_IPV4)
+ hash_cfg_reset(&pf->hash_ctx.gtpu4.ctx[gtpu_ctx_idx]);
+ else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
+ hash_cfg_reset(&pf->hash_ctx.gtpu6.ctx[gtpu_ctx_idx]);
+}
+
int
ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
uint64_t fld, uint32_t hdr)
@@ -2677,9 +2888,7 @@ ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
if (ret && ret != ICE_ERR_DOES_NOT_EXIST)
PMD_DRV_LOG(ERR, "remove rss cfg failed\n");
- ret = ice_rem_rss_cfg_post(pf, hdr);
- if (ret)
- PMD_DRV_LOG(ERR, "remove rss cfg post failed\n");
+ ice_rem_rss_cfg_post(pf, hdr);
return 0;
}
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index 37b956e2f..978909603 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -365,37 +365,37 @@ struct ice_fdir_info {
struct ice_fdir_counter_pool_container counter;
};
-#define ICE_HASH_CFG_VALID(p) \
- ((p)->hash_fld != 0 && (p)->pkt_hdr != 0)
-
-#define ICE_HASH_CFG_RESET(p) do { \
- (p)->hash_fld = 0; \
- (p)->pkt_hdr = 0; \
-} while (0)
-
-#define ICE_HASH_CFG_IS_ROTATING(p) \
- ((p)->rotate == true)
-
-#define ICE_HASH_CFG_ROTATE_START(p) \
- ((p)->rotate = true)
-
-#define ICE_HASH_CFG_ROTATE_STOP(p) \
- ((p)->rotate = false)
+#define ICE_HASH_GTPU_CTX_EH_IP 0
+#define ICE_HASH_GTPU_CTX_EH_IP_UDP 1
+#define ICE_HASH_GTPU_CTX_EH_IP_TCP 2
+#define ICE_HASH_GTPU_CTX_UP_IP 3
+#define ICE_HASH_GTPU_CTX_UP_IP_UDP 4
+#define ICE_HASH_GTPU_CTX_UP_IP_TCP 5
+#define ICE_HASH_GTPU_CTX_DW_IP 6
+#define ICE_HASH_GTPU_CTX_DW_IP_UDP 7
+#define ICE_HASH_GTPU_CTX_DW_IP_TCP 8
+#define ICE_HASH_GTPU_CTX_MAX 9
+
+enum ice_rss_hash_func {
+ ICE_RSS_HASH_TOEPLITZ = 0,
+ ICE_RSS_HASH_TOEPLITZ_SYMMETRIC = 1,
+ ICE_RSS_HASH_XOR = 2,
+ ICE_RSS_HASH_JHASH = 3,
+};
-struct ice_hash_cfg {
- uint32_t pkt_hdr;
- uint64_t hash_fld;
- bool rotate; /* rotate l3 rule after l4 rule. */
- bool symm;
+struct ice_rss_hash_cfg {
+ u32 addl_hdrs;
+ u64 hash_flds;
+ enum ice_rss_hash_func hash_func;
};
struct ice_hash_gtpu_ctx {
- struct ice_hash_cfg ipv4;
- struct ice_hash_cfg ipv6;
- struct ice_hash_cfg ipv4_udp;
- struct ice_hash_cfg ipv6_udp;
- struct ice_hash_cfg ipv4_tcp;
- struct ice_hash_cfg ipv6_tcp;
+ struct ice_rss_hash_cfg ctx[ICE_HASH_GTPU_CTX_MAX];
+};
+
+struct ice_hash_ctx {
+ struct ice_hash_gtpu_ctx gtpu4;
+ struct ice_hash_gtpu_ctx gtpu6;
};
struct ice_pf {
@@ -421,7 +421,7 @@ struct ice_pf {
uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
uint16_t fdir_qp_offset;
struct ice_fdir_info fdir; /* flow director info */
- struct ice_hash_gtpu_ctx gtpu_hash_ctx;
+ struct ice_hash_ctx hash_ctx;
uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
struct ice_hw_port_stats stats_offset;
--
2.25.1
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [dpdk-dev] [PATCH v2] net/ice: refactor RSS config for potential bugs
2020-10-13 6:25 ` [dpdk-dev] [PATCH v2] " Junfeng Guo
@ 2020-10-13 7:51 ` Zhang, Qi Z
0 siblings, 0 replies; 3+ messages in thread
From: Zhang, Qi Z @ 2020-10-13 7:51 UTC (permalink / raw)
To: Guo, Junfeng, Wu, Jingjing, Xing, Beilei; +Cc: dev
> -----Original Message-----
> From: Guo, Junfeng <junfeng.guo@intel.com>
> Sent: Tuesday, October 13, 2020 2:26 PM
> To: Zhang, Qi Z <qi.z.zhang@intel.com>; Wu, Jingjing <jingjing.wu@intel.com>;
> Xing, Beilei <beilei.xing@intel.com>
> Cc: dev@dpdk.org; Guo, Junfeng <junfeng.guo@intel.com>
> Subject: [PATCH v2] net/ice: refactor RSS config for potential bugs
Renamed to refactor RSS config wrap and fix potential bugs
>
> Current implementation for PF RSS config wrap function has some potential
> bugs about GTPU, e.g., same input set for GTPU inner and non-TUN have
> different hash values, which should be same. Thus, we use extra pre and post
> processing to re-config GTPU rules.
>
> Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
Applied to dpdk-next-net-intel with fixline be added
Added fix line: Fixes: 185fe122f489 ("net/ice: fix GTPU down/uplink and extension conflict")
Thanks
Qi
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2020-10-13 7:51 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-10-12 7:54 [dpdk-dev] [PATCH] net/ice: refactor RSS config for potential bugs Junfeng Guo
2020-10-13 6:25 ` [dpdk-dev] [PATCH v2] " Junfeng Guo
2020-10-13 7:51 ` Zhang, Qi Z
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).